prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>healthcheck.go<|end_file_name|><|fim▁begin|>// Copyright 2015 tsuru-autoscale authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.<|fim▁hole|>package api
import (
"fmt"
"net/http"
)
func healthcheck(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "WORKING")
}<|fim▁end|> | |
<|file_name|>entity.rs<|end_file_name|><|fim▁begin|>//! Common entity datatypes.
use std::default::Default;
use std::ops::Add;
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
/// Unique ID for a loaded entity.
pub struct EntityId(u32);
impl Default for EntityId {<|fim▁hole|> EntityId(0)
}
}
impl Add<u32> for EntityId {
type Output = EntityId;
fn add(self, rhs: u32) -> EntityId {
let EntityId(i) = self;
EntityId(i + rhs)
}
}<|fim▁end|> | fn default() -> EntityId { |
<|file_name|>YouTubeRecentVideos.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react';
// Libs
import { Col, Container, Row } from 'react-bootstrap';
// Components
import {
ProjectTopics,
ProjectWrapper,
ProjectYear,
ResponsiveFrame,
Spacer,
Title,
} from '../components';
const YouTubeRecentVideos: React.FC = (): JSX.Element => (
<ProjectWrapper>
<Container>
<Row>
<Col lg={{ offset: 0, span: 7 }}>
<ResponsiveFrame
src="https://www.youtube.com/embed/qd0F5YgjtJk"
title="Ryders NYC"
/>
</Col>
<Col lg={{ offset: 0, span: 5 }}>
<Col lg={{ offset: 0, span: 8 }}>
<ResponsiveFrame
src="https://www.youtube.com/embed/N3N1V4KUbzo"
title="Trip to China 2017"
/>
</Col>
<Spacer size={7} />
<Col lg={{ offset: 0, span: 8 }}>
<ResponsiveFrame
src="https://www.youtube.com/embed/3MSpfgV3dfA"
title="Trip to China 2017"
/>
</Col>
</Col>
</Row>
<Row>
<Col lg={{ offset: 0, span: 12 }}>
<Title>YouTube Recent Videos</Title>
<ProjectTopics topics={['Video', 'Youtube']} />
<ProjectYear>2017—2020</ProjectYear>
</Col>
</Row>
</Container><|fim▁hole|>
export default YouTubeRecentVideos;<|fim▁end|> | </ProjectWrapper>
); |
<|file_name|>TimeUtils.ts<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
/**
* Utility class which exposes functions for managing date and time operations.
*/
export class TimeUtils {
/**
* return the current time in Unix time (seconds).
*/
static nowSeconds(): number {
// Date.getTime() returns in milliseconds.
return Math.round(new Date().getTime() / 1000.0);
}
/**
* check if a token is expired based on given UTC time in seconds.
* @param expiresOn<|fim▁hole|> const offsetCurrentTimeSec = TimeUtils.nowSeconds() + offset;
// If current time + offset is greater than token expiration time, then token is expired.
return (offsetCurrentTimeSec > expirationSec);
}
/**
* If the current time is earlier than the time that a token was cached at, we must discard the token
* i.e. The system clock was turned back after acquiring the cached token
* @param cachedAt
* @param offset
*/
static wasClockTurnedBack(cachedAt: string): boolean {
const cachedAtSec = Number(cachedAt);
return cachedAtSec > TimeUtils.nowSeconds();
}
/**
* Waits for t number of milliseconds
* @param t number
* @param value T
*/
static delay<T>(t: number, value?: T): Promise<T | void> {
return new Promise((resolve) => setTimeout(() => resolve(value), t));
}
}<|fim▁end|> | */
static isTokenExpired(expiresOn: string, offset: number): boolean {
// check for access token expiry
const expirationSec = Number(expiresOn) || 0; |
<|file_name|>debugger-stepping-and-breakpoints.js<|end_file_name|><|fim▁begin|>// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
InspectorTest.log('Tests how multiple sessions interact while pausing, stepping, setting breakpoints and blackboxing.');
var contextGroup = new InspectorTest.ContextGroup();
contextGroup.addScript(`
function foo() {
return 1;
}
function baz() {
return 2;
}
function stepping() {
debugger;
var a = 1;
var b = 1;
}
//# sourceURL=test.js`, 9, 25);
contextGroup.addScript(`
function bar() {
debugger;
}
//# sourceURL=test2.js`, 23, 25);
(async function test() {
InspectorTest.log('Connecting session 1');
var session1 = contextGroup.connect();
await session1.Protocol.Debugger.enable();
InspectorTest.log('Pausing in 1');
session1.Protocol.Runtime.evaluate({expression: 'debugger;'});
await waitForPaused(session1, 1);
InspectorTest.log('Connecting session 2');
var session2 = contextGroup.connect();
var enabledPromise = session2.Protocol.Debugger.enable();
await waitForPaused(session2, 2);
await enabledPromise;
InspectorTest.log('Resuming in 2');<|fim▁hole|> await session1.Protocol.Debugger.setBreakpointByUrl({url: 'test.js', lineNumber: 11});
await session1.Protocol.Debugger.setBreakpointByUrl({url: 'test.js', lineNumber: 14});
InspectorTest.log('Setting breakpoints in 2');
await session2.Protocol.Debugger.setBreakpointByUrl({url: 'test.js', lineNumber: 11});
InspectorTest.log('Evaluating common breakpoint in 1');
session1.Protocol.Runtime.evaluate({expression: 'foo();'});
await waitForBothPaused();
InspectorTest.log('Resuming in 1');
session1.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Evaluating debugger in 1');
session1.Protocol.Runtime.evaluate({expression: 'bar();'});
await waitForBothPaused();
InspectorTest.log('Resuming in 2');
session2.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Evaluating exclusive breakpoint in 1');
session1.Protocol.Runtime.evaluate({expression: 'baz();'});
await waitForBothPaused();
InspectorTest.log('Resuming in 1');
session1.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Evaluating common breakpoint in 2');
session2.Protocol.Runtime.evaluate({expression: 'foo();'});
await waitForBothPaused();
InspectorTest.log('Resuming in 2');
session2.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Evaluating debugger in 2');
session2.Protocol.Runtime.evaluate({expression: 'bar();'});
await waitForBothPaused();
InspectorTest.log('Resuming in 2');
session2.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Evaluating exclusive breakpoint in 2');
session2.Protocol.Runtime.evaluate({expression: 'baz();'});
await waitForBothPaused();
InspectorTest.log('Resuming in 1');
session1.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Evaluating stepping in 1');
session1.Protocol.Runtime.evaluate({expression: 'stepping();'});
await waitForBothPaused();
InspectorTest.log('Stepping into in 2');
session2.Protocol.Debugger.stepInto();
await waitForBothResumed();
await waitForBothPaused();
InspectorTest.log('Stepping over in 1');
session1.Protocol.Debugger.stepOver();
await waitForBothResumed();
await waitForBothPaused();
InspectorTest.log('Stepping out in 2');
session2.Protocol.Debugger.stepOut();
await waitForBothResumed();
await waitForBothPaused();
InspectorTest.log('Resuming in 1');
session1.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Pausing in next statement');
contextGroup.schedulePauseOnNextStatement('some-reason', JSON.stringify({a: 42}));
session2.Protocol.Runtime.evaluate({expression: 'var a = 1;'});
await waitForBothPaused();
InspectorTest.log('Resuming in 1');
session1.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Pausing in next statement');
contextGroup.schedulePauseOnNextStatement('some-reason', JSON.stringify({a: 42}));
session2.Protocol.Runtime.evaluate({expression: 'var a = 1;'});
await waitForBothPaused();
InspectorTest.log('Resuming in 2');
session2.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Blackboxing bar() in 2');
await session2.Protocol.Debugger.setBlackboxPatterns({patterns: ['test2.js']});
InspectorTest.log('Evaluating bar() in 2');
session2.Protocol.Runtime.evaluate({expression: 'bar();'});
await waitForPaused(session1, 1);
InspectorTest.log('Resuming in 1');
session1.Protocol.Debugger.resume();
await waitForResumed(session1, 1);
InspectorTest.log('Blackboxing bar() in 1');
await session1.Protocol.Debugger.setBlackboxPatterns({patterns: ['test2.js']});
InspectorTest.log('Evaluating bar() in 2');
await session2.Protocol.Runtime.evaluate({expression: 'bar();'});
InspectorTest.log('Skipping pauses in 1');
await session1.Protocol.Debugger.setSkipAllPauses({skip: true});
InspectorTest.log('Evaluating common breakpoint in 1');
session1.Protocol.Runtime.evaluate({expression: 'foo();'});
await waitForPaused(session2, 2);
InspectorTest.log('Resuming in 2');
session2.Protocol.Debugger.resume();
await waitForResumed(session2, 2);
InspectorTest.log('Skipping pauses in 2');
await session2.Protocol.Debugger.setSkipAllPauses({skip: true});
InspectorTest.log('Evaluating common breakpoint in 1');
await session1.Protocol.Runtime.evaluate({expression: 'foo();'});
InspectorTest.log('Unskipping pauses in 1');
await session1.Protocol.Debugger.setSkipAllPauses({skip: false});
InspectorTest.log('Unskipping pauses in 2');
await session2.Protocol.Debugger.setSkipAllPauses({skip: false});
InspectorTest.log('Deactivating breakpoints in 1');
await session1.Protocol.Debugger.setBreakpointsActive({active: false});
InspectorTest.log('Evaluating common breakpoint in 1');
session1.Protocol.Runtime.evaluate({expression: 'foo();'});
await waitForPaused(session2, 2);
InspectorTest.log('Resuming in 2');
session2.Protocol.Debugger.resume();
await waitForResumed(session2, 2);
InspectorTest.log('Deactivating breakpoints in 2');
await session2.Protocol.Debugger.setBreakpointsActive({active: false});
InspectorTest.log('Evaluating common breakpoint in 1');
await session1.Protocol.Runtime.evaluate({expression: 'foo();'});
InspectorTest.log('Activating breakpoints in 1');
await session1.Protocol.Debugger.setBreakpointsActive({active: true});
InspectorTest.log('Activating breakpoints in 2');
await session2.Protocol.Debugger.setBreakpointsActive({active: true});
InspectorTest.log('Disabling debugger agent in 1');
await session1.Protocol.Debugger.disable();
InspectorTest.log('Evaluating breakpoint in 1 (should not be triggered)');
session2.Protocol.Runtime.evaluate({expression: 'baz();\ndebugger;'});
await waitForPaused(session2, 2);
InspectorTest.completeTest();
function waitForBothPaused() {
return Promise.all([waitForPaused(session1, 1), waitForPaused(session2, 2)]);
}
function waitForBothResumed() {
return Promise.all([waitForResumed(session1, 1), waitForResumed(session2, 2)]);
}
})();
function waitForPaused(session, num) {
return session.Protocol.Debugger.oncePaused().then(message => {
InspectorTest.log(`Paused in ${num}:`);
InspectorTest.log(` reason: ${message.params.reason}`);
InspectorTest.log(` hit breakpoints: ${(message.params.hitBreakpoints || []).join(';')}`);
var callFrame = message.params.callFrames[0];
InspectorTest.log(` location: ${callFrame.functionName || '<anonymous>'}@${callFrame.location.lineNumber}`);
InspectorTest.log(` data: ${JSON.stringify(message.params.data || null)}`);
});
}
function waitForResumed(session, num) {
return session.Protocol.Debugger.onceResumed().then(message => {
InspectorTest.log(`Resumed in ${num}`);
});
}<|fim▁end|> | session2.Protocol.Debugger.resume();
await waitForBothResumed();
InspectorTest.log('Setting breakpoints in 1'); |
<|file_name|>mirror_gen.py<|end_file_name|><|fim▁begin|>"""
A "mirroring" ``stdout`` context manager.
While active, the context manager reverses text output to
``stdout``::
# BEGIN MIRROR_GEN_DEMO_1
>>> from mirror_gen import looking_glass
>>> with looking_glass() as what: # <1>
... print('Alice, Kitty and Snowdrop')
... print(what)
...
pordwonS dna yttiK ,ecilA
YKCOWREBBAJ
>>> what
'JABBERWOCKY'
# END MIRROR_GEN_DEMO_1
<|fim▁hole|># BEGIN MIRROR_GEN_DEMO_2
>>> from mirror_gen import looking_glass
>>> manager = looking_glass() # <1>
>>> manager # doctest: +ELLIPSIS
<contextlib._GeneratorContextManager object at 0x...>
>>> monster = manager.__enter__() # <2>
>>> monster == 'JABBERWOCKY' # <3>
eurT
>>> monster
'YKCOWREBBAJ'
>>> manager # doctest: +ELLIPSIS
>...x0 ta tcejbo reganaMtxetnoCrotareneG_.biltxetnoc<
>>> manager.__exit__(None, None, None) # <4>
>>> monster
'JABBERWOCKY'
# END MIRROR_GEN_DEMO_2
"""
# BEGIN MIRROR_GEN_EX
import contextlib
@contextlib.contextmanager # <1>
def looking_glass():
import sys
original_write = sys.stdout.write # <2>
def reverse_write(text): # <3>
original_write(text[::-1])
sys.stdout.write = reverse_write # <4>
yield 'JABBERWOCKY' # <5>
sys.stdout.write = original_write # <6>
# END MIRROR_GEN_EX<|fim▁end|> |
This exposes the context manager operation::
|
<|file_name|>panel.js<|end_file_name|><|fim▁begin|>'use strict';
chrome.devtools.panels.create('Luffa', '', 'devtool.html', function (panel) {
var reactPanel = null;
panel.onShown.addListener(function (window) {<|fim▁hole|> reactPanel.resumeTransfer();
});
panel.onHidden.addListener(function () {
if (reactPanel) {
reactPanel.hideHighlight();
reactPanel.pauseTransfer();
}
});
});
//# sourceMappingURL=panel.js.map<|fim▁end|> | // when the user switches to the panel, check for an elements tab
// selection
window.panel.getNewSelection();
reactPanel = window.panel; |
<|file_name|>bitcoin_fr.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="fr" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About FairQuark</source>
<translation>À propos de FairQuark</translation>
</message>
<message>
<location line="+39"/>
<source><b>FairQuark</b> version</source>
<translation><b>FairQuark</b> version</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Ce logiciel est en phase expérimentale.
Distribué sous licence MIT/X11, voir le fichier COPYING ou http://www.opensource.org/licenses/mit-license.php.
Ce produit comprend des fonctionnalités développées par le projet OpenSSL pour être utilisés dans la boîte à outils OpenSSL (http://www.openssl.org/), un logiciel cryptographique écrit par Eric Young ([email protected]), et des fonctionnalités développées pour le logiciel UPnP écrit par Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Droit d'auteur</translation>
</message>
<message>
<location line="+0"/>
<source>The Quarkcoin developers</source>
<translation>Les développeurs Quarkcoin</translation>
</message>
<message>
<location line="+0"/>
<source>The FairQuark developers</source>
<translation>Les développeurs FairQuark</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Carnet d'adresses</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Double cliquez afin de modifier l'adresse ou l'étiquette</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Créer une nouvelle adresse</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copier l'adresse sélectionnée dans le presse-papiers</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Nouvelle adresse</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your FairQuark addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Voici vos adresses FairQuark qui vous permettent de recevoir des paiements. Vous pouvez donner une adresse différente à chaque expéditeur afin de savoir qui vous paye.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Copier l'adresse</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Afficher le &QR Code</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a FairQuark address</source>
<translation>Signer un message pour prouver que vous détenez une adresse FairQuark</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signer un &message</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Effacer l'adresse actuellement sélectionnée de la liste</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Exporter les données de l'onglet courant vers un fichier</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&Exporter</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified FairQuark address</source>
<translation>Vérifier un message pour vous assurer qu'il a bien été signé avec l'adresse FairQuark spécifiée</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Vérifier un message</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Supprimer</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your FairQuark addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Ce sont vos adresses FairQuark pour émettre des paiements. Vérifiez toujours le montant et l'adresse du destinataire avant d'envoyer des pièces.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Copier l'é&tiquette</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Éditer</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Envoyer des Bit&coins</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Exporter les données du carnet d'adresses</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Valeurs séparées par des virgules (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Erreur lors de l'exportation</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Impossible d'écrire dans le fichier %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(aucune étiquette)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Dialogue de phrase de passe</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Entrez la phrase de passe</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nouvelle phrase de passe</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Répétez la phrase de passe</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Entrez une nouvelle phrase de passe pour le porte-monnaie.<br/>Veuillez utiliser une phrase composée de <b>10 caractères aléatoires ou plus</b>, ou bien de <b>huit mots ou plus</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Chiffrer le porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Cette opération nécessite votre phrase de passe pour déverrouiller le porte-monnaie.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Déverrouiller le porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Cette opération nécessite votre phrase de passe pour décrypter le porte-monnaie.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Déchiffrer le porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Changer la phrase de passe</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Entrez l’ancienne phrase de passe pour le porte-monnaie ainsi que la nouvelle.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirmer le chiffrement du porte-monnaie</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR FairQuarkS</b>!</source>
<translation>Attention : Si vous chiffrez votre porte-monnaie et perdez votre phrase de passe, vous <b>PERDREZ ACCÈS À TOUS VOS FairQuarkS</b> !</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Êtes-vous sûr de vouloir chiffrer votre porte-monnaie ?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANT : Les sauvegardes précédentes de votre fichier de porte-monnaie devraient être remplacées par le nouveau fichier crypté de porte-monnaie. Pour des raisons de sécurité, les précédentes sauvegardes de votre fichier de porte-monnaie non chiffré deviendront inutilisables dès que vous commencerez à utiliser le nouveau porte-monnaie chiffré.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Attention : la touche Verr. Maj. est activée !</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Porte-monnaie chiffré</translation>
</message>
<message>
<location line="-56"/>
<source>FairQuark will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your FairQuarks from being stolen by malware infecting your computer.</source>
<translation>FairQuark va à présent se fermer pour terminer la procédure de cryptage. N'oubliez pas que le chiffrement de votre porte-monnaie ne peut pas fournir une protection totale contre le vol par des logiciels malveillants qui infecteraient votre ordinateur.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Le chiffrement du porte-monnaie a échoué</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Le chiffrement du porte-monnaie a échoué en raison d'une erreur interne. Votre porte-monnaie n'a pas été chiffré.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Les phrases de passe entrées ne correspondent pas.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Le déverrouillage du porte-monnaie a échoué</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La phrase de passe entrée pour décrypter le porte-monnaie était incorrecte.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Le déchiffrage du porte-monnaie a échoué</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>La phrase de passe du porte-monnaie a été modifiée avec succès.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Signer un &message...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Synchronisation avec le réseau…</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Vue d'ensemble</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Afficher une vue d’ensemble du porte-monnaie</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Transactions</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Parcourir l'historique des transactions</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Éditer la liste des adresses et des étiquettes stockées</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Afficher la liste des adresses pour recevoir des paiements</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>Q&uitter</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Quitter l’application</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about FairQuark</source>
<translation>Afficher des informations à propos de FairQuark</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>À propos de &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Afficher des informations sur Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Options…</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Chiffrer le porte-monnaie...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Sauvegarder le porte-monnaie...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Modifier la phrase de passe...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Importation des blocs depuis le disque...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Réindexation des blocs sur le disque...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a FairQuark address</source>
<translation>Envoyer des pièces à une adresse FairQuark</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for FairQuark</source>
<translation>Modifier les options de configuration de FairQuark</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Sauvegarder le porte-monnaie à un autre emplacement</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Modifier la phrase de passe utilisée pour le chiffrement du porte-monnaie</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>Fenêtre de &débogage</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Ouvrir une console de débogage et de diagnostic</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Vérifier un message...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>FairQuark</source>
<translation>FairQuark</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Porte-monnaie</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Envoyer</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Recevoir</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Adresses</translation>
</message>
<message>
<location line="+22"/>
<source>&About FairQuark</source>
<translation>À &propos de FairQuark</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Afficher / Cacher</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Afficher ou masquer la fenêtre principale</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Crypter les clefs privées de votre porte-monnaie</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your FairQuark addresses to prove you own them</source>
<translation>Signer les messages avec vos adresses FairQuark pour prouver que vous les détenez</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified FairQuark addresses</source>
<translation>Vérifier les messages pour vous assurer qu'ils ont bien été signés avec les adresses FairQuark spécifiées</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Fichier</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Réglages</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Aide</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Barre d'outils des onglets</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>FairQuark client</source>
<translation>Client FairQuark</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to FairQuark network</source>
<translation><numerusform>%n connexion active avec le réseau FairQuark</numerusform><numerusform>%n connexions actives avec le réseau FairQuark</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>Aucune source de bloc disponible...</translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>%1 blocs sur %2 (estimés) de l'historique des transactions traités.</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>%1 blocs de l'historique des transactions traités.</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n heure</numerusform><numerusform>%n heures</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n jour</numerusform><numerusform>%n jours</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n semaine</numerusform><numerusform>%n semaines</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>%1 en arrière</translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>Le dernier bloc reçu avait été généré il y a %1.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>Les transactions après cela ne seront pas encore visibles.</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Erreur</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Information</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Cette transaction dépasse la limite de taille. Vous pouvez quand même l'envoyer en vous acquittant de frais d'un montant de %1 qui iront aux nœuds qui traiteront la transaction et aideront à soutenir le réseau. Voulez-vous payer les frais ?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>À jour</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Rattrapage…</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Confirmer les frais de transaction</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Transaction envoyée</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Transaction entrante</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Date : %1
Montant : %2
Type : %3
Adresse : %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>Gestion des URI</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid FairQuark address or malformed URI parameters.</source>
<translation>L'URI ne peut être analysé ! Cela peut être causé par une adresse FairQuark invalide ou par des paramètres d'URI malformés.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Le porte-monnaie est <b>chiffré</b> et est actuellement <b>déverrouillé</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Le porte-monnaie est <b>chiffré</b> et est actuellement <b>verrouillé</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. FairQuark can no longer continue safely and will quit.</source>
<translation>Une erreur fatale est survenue. FairQuark ne peut plus continuer à fonctionner de façon sûre et va s'arrêter.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Alerte réseau</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Éditer l'adresse</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Étiquette</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>L’étiquette associée à cette entrée du carnet d'adresses</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adresse</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>L’adresse associée avec cette entrée du carnet d'adresses. Ne peut être modifiées que les adresses d’envoi.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Nouvelle adresse de réception</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nouvelle adresse d’envoi</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Éditer l’adresse de réception</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Éditer l’adresse d'envoi</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>L’adresse fournie « %1 » est déjà présente dans le carnet d'adresses.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid FairQuark address.</source>
<translation>L'adresse fournie « %1 » n'est pas une adresse FairQuark valide.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Impossible de déverrouiller le porte-monnaie.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Échec de la génération de la nouvelle clef.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>FairQuark-Qt</source>
<translation>FairQuark-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>version</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Utilisation :</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>options de ligne de commande</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>Options Interface Utilisateur</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Définir la langue, par exemple « de_DE » (par défaut : la langue du système)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Démarrer sous forme minimisée</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Afficher l'écran d'accueil au démarrage (par défaut : 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Options</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>Réglages &principaux</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation>Frais de transaction optionnel par ko qui aident à garantir un traitement rapide des transactions. La plupart des transactions utilisent 1 ko.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Payer des &frais de transaction</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start FairQuark after logging in to the system.</source>
<translation>Démarrer FairQuark automatiquement lors de l'ouverture une session sur l'ordinateur.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start FairQuark on system login</source>
<translation>&Démarrer FairQuark lors de l'ouverture d'une session</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>Remettre toutes les options du client aux valeurs par défaut.</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>&Remise à zéro des options</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&Réseau</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the FairQuark client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Ouvrir le port du client FairQuark automatiquement sur le routeur. Cela ne fonctionne que si votre routeur supporte l'UPnP et si la fonctionnalité est activée.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Ouvrir le port avec l'&UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the FairQuark network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Connexion au réseau FairQuark à travers un proxy SOCKS (par ex. lors d'une connexion via Tor).</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Connexion à travers un proxy SOCKS :</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>&IP du proxy :</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Adresse IP du proxy (par ex. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Port :</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Port du proxy (par ex. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>&Version SOCKS :</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Version SOCKS du serveur mandataire (par ex. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Fenêtre</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Afficher uniquement une icône système après minimisation.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimiser dans la barre système au lieu de la barre des tâches</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimiser au lieu quitter l'application lorsque la fenêtre est fermée. Lorsque cette option est activée, l'application ne pourra être fermée qu'en sélectionnant Quitter dans le menu déroulant.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimiser lors de la fermeture</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Affichage</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>&Langue de l'interface utilisateur :</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting FairQuark.</source>
<translation>La langue de l'interface utilisateur peut être définie ici. Ce réglage sera pris en compte après redémarrage de FairQuark.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Unité d'affichage des montants :</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Choisissez la sous-unité par défaut pour l'affichage dans l'interface et lors de l'envoi de pièces.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show FairQuark addresses in the transaction list or not.</source>
<translation>Détermine si les adresses FairQuark seront affichées sur la liste des transactions.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Afficher les adresses sur la liste des transactions</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&Valider</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>A&nnuler</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Appliquer</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>par défaut</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>Confirmer la remise à zéro des options</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>La prise en compte de certains réglages peut nécessiter un redémarrage du client.</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>Voulez-vous continuer ?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting FairQuark.</source>
<translation>Ce réglage sera pris en compte après un redémarrage de FairQuark.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>L'adresse de proxy fournie est invalide.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Formulaire</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the FairQuark network after a connection is established, but this process has not completed yet.</source>
<translation>Les informations affichées peuvent être obsolètes. Votre porte-monnaie est automatiquement synchronisé avec le réseau FairQuark lorsque la connexion s'établit, or ce processus n'est pas encore terminé.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Solde :</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Non confirmé :</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Porte-monnaie</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>Immature :</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Le solde généré n'est pas encore mûr</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Transactions récentes</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Votre solde actuel</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Total des transactions qui doivent encore être confirmées et qui ne sont pas prises en compte dans le solde actuel</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>désynchronisé</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start FairQuark: click-to-pay handler</source>
<translation>Impossible de démarrer FairQuark : gestionnaire de cliquer-pour-payer</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Dialogue de QR Code</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Demande de paiement</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Montant :</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Étiquette :</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Message :</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Enregistrer sous...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Erreur de l'encodage de l'URI dans le QR Code.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Le montant entré est invalide, veuillez le vérifier.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>L'URI résultant est trop long, essayez avec un texte d'étiquette ou de message plus court.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Sauvegarder le QR Code</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>Images PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Nom du client</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>Indisponible</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Version du client</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informations</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Version d'OpenSSL utilisée</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Date de démarrage</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Réseau</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Nombre de connexions</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Sur testnet</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Chaîne de blocs</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Nombre actuel de blocs</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Nombre total estimé de blocs</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Horodatage du dernier bloc</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Ouvrir</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Options de ligne de commande</translation>
</message>
<message>
<location line="+7"/>
<source>Show the FairQuark-Qt help message to get a list with possible FairQuark command-line options.</source>
<translation>Afficher le message d'aide de FairQuark-Qt pour obtenir la liste des options de ligne de commande disponibles pour FairQuark.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Afficher</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Console</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Date de compilation</translation>
</message>
<message>
<location line="-104"/>
<source>FairQuark - Debug window</source>
<translation>FairQuark - Fenêtre de débogage</translation>
</message>
<message>
<location line="+25"/>
<source>FairQuark Core</source>
<translation>Noyau FairQuark</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Journal de débogage</translation>
</message>
<message>
<location line="+7"/>
<source>Open the FairQuark debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Ouvrir le journal de débogage de FairQuark depuis le répertoire de données actuel. Cela peut prendre quelques secondes pour les journaux de grande taille.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Nettoyer la console</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the FairQuark RPC console.</source>
<translation>Bienvenue sur la console RPC de FairQuark.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Utilisez les touches de curseur pour naviguer dans l'historique et <b>Ctrl-L</b> pour effacer l'écran.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Tapez <b>help</b> pour afficher une vue générale des commandes disponibles.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Envoyer des pièces</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Envoyer des pièces à plusieurs destinataires à la fois</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Ajouter un &destinataire</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Enlever tous les champs de transaction</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>&Tout nettoyer</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Solde :</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Confirmer l’action d'envoi</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>E&nvoyer</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> à %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Confirmer l’envoi des pièces</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Êtes-vous sûr de vouloir envoyer %1 ?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> et </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Cette adresse de destinataire n’est pas valide, veuillez la vérifier.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Le montant à payer doit être supérieur à 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Le montant dépasse votre solde.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Le montant dépasse votre solde lorsque les frais de transaction de %1 sont inclus.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Adresse dupliquée trouvée, il n'est possible d'envoyer qu'une fois à chaque adresse par opération d'envoi.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Erreur : Échec de la création de la transaction !</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Erreur : la transaction a été rejetée. Cela peut arriver si certaines pièces de votre porte-monnaie ont déjà été dépensées, par exemple si vous avez utilisé une copie de wallet.dat avec laquelle les pièces ont été dépensées mais pas marquées comme telles ici.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Formulaire</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Montant :</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Payer &à :</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>L'adresse à laquelle le paiement sera envoyé (par ex. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Entrez une étiquette pour cette adresse afin de l’ajouter à votre carnet d’adresses</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Étiquette :</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Choisir une adresse dans le carnet d'adresses</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Coller une adresse depuis le presse-papiers</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Enlever ce destinataire</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a FairQuark address (e.g. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>Entrez une adresse FairQuark (par ex. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Signatures - Signer / Vérifier un message</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Signer un message</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Vous pouvez signer des messages avec vos adresses pour prouver que les détenez. Faites attention à ne pas signer quoi que ce soit de vague car des attaques d'hameçonnage peuvent essayer d'usurper votre identité par votre signature. Ne signez que des déclarations entièrement détaillées et avec lesquelles vous serez d'accord.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>L'adresse avec laquelle le message sera signé (par ex. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Choisir une adresse depuis le carnet d'adresses</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Coller une adresse depuis le presse-papiers</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Entrez ici le message que vous désirez signer</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Signature</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copier la signature actuelle dans le presse-papiers</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this FairQuark address</source>
<translation>Signer le message pour prouver que vous détenez cette adresse FairQuark</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Signer le &message</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Remettre à zéro tous les champs de signature de message</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>&Tout nettoyer</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Vérifier un message</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Entrez ci-dessous l'adresse ayant servi à signer, le message (assurez-vous d'avoir copié exactement les retours à la ligne, les espacements, tabulations etc.) et la signature pour vérifier le message. Faites attention à ne pas déduire davantage de la signature que ce qui est contenu dans le message signé lui-même pour éviter d'être trompé par une attaque d'homme du milieu.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>L'adresse avec laquelle le message a été signé (par ex. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified FairQuark address</source>
<translation>Vérifier le message pour vous assurer qu'il a bien été signé par l'adresse FairQuark spécifiée</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>Vérifier un &message</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Remettre à zéro tous les champs de vérification de message</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a FairQuark address (e.g. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</source>
<translation>Entrez une adresse FairQuark (par ex. qi1NooNjQySQLDJ643HWfZZ7UN2EmLEvix)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Cliquez sur « Signer le message » pour générer la signature</translation>
</message>
<message>
<location line="+3"/>
<source>Enter FairQuark signature</source>
<translation>Entrer une signature FairQuark</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>L'adresse entrée est invalide.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Veuillez vérifier l'adresse et réessayez.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>L'adresse entrée ne fait pas référence à une clef.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Le déverrouillage du porte-monnaie a été annulé.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>La clef privée pour l'adresse indiquée n'est pas disponible.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>La signature du message a échoué.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Le message a été signé.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>La signature n'a pu être décodée.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Veuillez vérifier la signature et réessayez.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>La signature ne correspond pas au hachage du message.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Échec de la vérification du message.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Message vérifié.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Quarkcoin developers</source>
<translation>Les développeurs Quarkcoin</translation>
</message>
<message>
<location line="+1"/>
<source>The FairQuark developers</source>
<translation>Les développeurs FairQuark</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Ouvert jusqu'à %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/hors ligne</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/non confirmée</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 confirmations</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>État</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, diffusée à travers %n nœud</numerusform><numerusform>, diffusée à travers %n nœuds</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Source</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Génération</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>De</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>À</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>votre propre adresse</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>étiquette</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Crédit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>arrive à maturité dans %n bloc</numerusform><numerusform>arrive à maturité dans %n blocs de plus</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>non accepté</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Débit</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Frais de transaction</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Montant net</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Message</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Commentaire</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>ID de la transaction</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 240 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Les pièces générées doivent mûrir pendant 240 blocs avant de pouvoir être dépensées. Lorsque vous avez généré ce bloc, il a été diffusé sur le réseau pour être ajouté à la chaîne de blocs. S’il échoue a intégrer la chaîne, son état sera modifié en « non accepté » et il ne sera pas possible de le dépenser. Cela peut arriver occasionnellement si un autre nœud génère un bloc quelques secondes avant ou après vous.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Informations de débogage</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transaction</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>Entrées</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>vrai</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>faux</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, n’a pas encore été diffusée avec succès</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Ouvert pour %n bloc de plus</numerusform><numerusform>Ouvert pour %n blocs de plus</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>inconnu</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Détails de la transaction</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Ce panneau affiche une description détaillée de la transaction</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Ouvert pour %n bloc de plus</numerusform><numerusform>Ouvert pour %n blocs de plus</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Ouvert jusqu'à %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Hors ligne (%1 confirmations)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Non confirmée (%1 confirmations sur un total de %2)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmée (%1 confirmations)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Le solde généré (mined) sera disponible quand il aura mûri dans %n bloc</numerusform><numerusform>Le solde généré (mined) sera disponible quand il aura mûri dans %n blocs</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Ce bloc n’a été reçu par aucun autre nœud et ne sera probablement pas accepté !</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Généré mais pas accepté</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Reçue avec</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Reçue de</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Envoyée à</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Paiement à vous-même</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Extraction</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(indisponible)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>État de la transaction. Laissez le pointeur de la souris sur ce champ pour voir le nombre de confirmations.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Date et heure de réception de la transaction.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Type de transaction.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>L’adresse de destination de la transaction.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Montant ajouté au, ou enlevé du, solde.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Toutes</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Aujourd’hui</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Cette semaine</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Ce mois-ci</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Mois dernier</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Cette année</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Intervalle…</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Reçues avec</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Envoyées à</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>À vous-même</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Extraction</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Autres</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Entrez une adresse ou une étiquette à rechercher</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Montant min</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copier l’adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copier l’étiquette</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copier le montant</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Copier l'ID de la transaction</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Éditer l’étiquette</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Afficher les détails de la transaction</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Exporter les données des transactions</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Valeurs séparées par des virgules (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confirmée</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Date</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Étiquette</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresse</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Montant</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Erreur lors de l’exportation</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Impossible d'écrire dans le fichier %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Intervalle :</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>à</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Envoyer des pièces</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation>&Exporter</translation>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Exporter les données de l'onglet courant vers un fichier</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>Sauvegarder le porte-monnaie</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Données de porte-monnaie (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Échec de la sauvegarde</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Une erreur est survenue lors de l'enregistrement des données de porte-monnaie à un nouvel endroit</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Sauvegarde réussie</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Les données de porte-monnaie ont été enregistrées avec succès sur le nouvel emplacement.</translation>
</message>
</context>
<context>
<name>FairQuark-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>FairQuark version</source>
<translation>Version de FairQuark</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Utilisation :</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or FairQuarkd</source>
<translation>Envoyer une commande à -server ou à FairQuarkd</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Lister les commandes</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Obtenir de l’aide pour une commande</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Options :</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: FairQuark.conf)</source>
<translation>Spécifier le fichier de configuration (par défaut : FairQuark.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: FairQuarkd.pid)</source>
<translation>Spécifier le fichier PID (par défaut : FairQuarkd.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Spécifier le répertoire de données</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Définir la taille du tampon en mégaoctets (par défaut : 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 8333 or testnet: 18333)</source>
<translation>Écouter les connexions sur le <port> (par défaut : 8333 ou testnet : 18333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Garder au plus <n> connexions avec les pairs (par défaut : 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Se connecter à un nœud pour obtenir des adresses de pairs puis se déconnecter</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Spécifier votre propre adresse publique</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Seuil de déconnexion des pairs de mauvaise qualité (par défaut : 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Délai en secondes de refus de reconnexion aux pairs de mauvaise qualité (par défaut : 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Une erreur est survenue lors de la mise en place du port RPC %u pour écouter sur IPv4 : %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 8332 or testnet: 18332)</source>
<translation>Écouter les connexions JSON-RPC sur le <port> (par défaut : 8332 ou tesnet : 18332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accepter les commandes de JSON-RPC et de la ligne de commande</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Fonctionner en arrière-plan en tant que démon et accepter les commandes</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Utiliser le réseau de test</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accepter les connexions entrantes (par défaut : 1 si -proxy ou -connect ne sont pas présents)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=FairQuarkrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "FairQuark Alert" [email protected]
</source>
<translation>%s, vous devez définir un mot de passe rpc dans le fichier de configuration :
%s
Il vous est conseillé d'utiliser le mot de passe aléatoire suivant :
rpcuser=FairQuarkrpc
rpcpassword=%s
(vous n'avez pas besoin de retenir ce mot de passe)
Le nom d'utilisateur et le mot de passe NE DOIVENT PAS être identiques.
Si le fichier n'existe pas, créez-le avec les droits de lecture accordés au propriétaire.
Il est aussi conseillé de régler alertnotify pour être prévenu des problèmes ;
par exemple : alertnotify=echo %%s | mail -s "FairQuark Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Une erreur est survenue lors de la mise en place du port RPC %u pour écouter sur IPv6, retour à IPv4 : %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Se lier à l'adresse donnée et toujours l'écouter. Utilisez la notation [host]:port pour l'IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. FairQuark is probably already running.</source>
<translation>Impossible d’obtenir un verrou sur le répertoire de données %s. FairQuark fonctionne probablement déjà.</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Erreur : la transaction a été rejetée ! Cela peut arriver si certaines pièces de votre porte-monnaie étaient déjà dépensées, par exemple si vous avez utilisé une copie de wallet.dat et les pièces ont été dépensées avec cette copie sans être marquées comme telles ici.</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>Erreur : cette transaction nécessite des frais de transaction d'au moins %s en raison de son montant, de sa complexité ou parce que des fonds reçus récemment sont utilisés !</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Exécuter une commande lorsqu'une alerte correspondante est reçue (%s dans la commande sera remplacé par le message)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Exécuter la commande lorsqu'une transaction de porte-monnaie change (%s dans la commande est remplacée par TxID)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Définir la taille maximale en octets des transactions prioritaires/à frais modiques (par défaut : 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Ceci est une pré-version de test - utilisez à vos risques et périls - ne l'utilisez pas pour miner ou pour des applications marchandes</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Attention : -paytxfee est réglée sur un montant très élevé ! Il s'agit des frais de transaction que vous payerez si vous émettez une transaction.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Avertissement : les transactions affichées pourraient être incorrectes ! Vous ou d'autres nœuds du réseau pourriez avoir besoin d'effectuer une mise à jour.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong FairQuark will not work properly.</source>
<translation>Attention : veuillez vérifier que l'heure et la date de votre ordinateur sont correctes ! Si votre horloge n'est pas à l'heure, FairQuark ne fonctionnera pas correctement.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Avertissement : une erreur est survenue lors de la lecture de wallet.dat ! Toutes les clefs ont été lues correctement mais les données de transaction ou les entrées du carnet d'adresses pourraient être incorrectes ou manquantes.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Avertissement : wallet.dat corrompu, données récupérées ! Le fichier wallet.dat original a été enregistré en tant que wallet.{horodatage}.bak dans %s ; si votre solde ou transactions sont incorrects vous devriez effectuer une restauration depuis une sauvegarde.</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Tenter de récupérer les clefs privées d'un wallet.dat corrompu</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Options de création des blocs :</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Ne se connecter qu'au(x) nœud(s) spécifié(s)</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Base de données des blocs corrompue détectée</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Découvrir sa propre adresse IP (par défaut : 1 lors de l'écoute et si -externalip n'est pas présent)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Voulez-vous reconstruire la base de données des blocs maintenant ?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>Erreur lors de l'initialisation de la base de données des blocs</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Erreur lors de l'initialisation de l'environnement de la base de données du porte-monnaie %s !</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>Erreur du chargement de la base de données des blocs</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Erreur lors de l'ouverture de la base de données</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Erreur : l'espace disque est faible !</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Erreur : Porte-monnaie verrouillé, impossible de créer la transaction !</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Erreur : erreur système :</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Échec de l'écoute sur un port quelconque. Utilisez -listen=0 si vous voulez cela.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>La lecture des informations de bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>La lecture du bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>La synchronisation de l'index des blocs a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>L''écriture de l'index des blocs a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>L'écriture des informations du bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>L'écriture du bloc a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>L'écriture des informations de fichier a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>L'écriture dans la base de données des pièces a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>L'écriture de l'index des transactions a échoué</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>L'écriture des données d'annulation a échoué</translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Trouver des pairs en utilisant la recherche DNS (par défaut : 1 sauf si -connect est utilisé)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation>Générer des pièces (défaut: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>Nombre de blocs à vérifier au démarrage (par défaut : 288, 0 = tout)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>Niveau d'approfondissement de la vérification des blocs (0-4, par défaut : 3)</translation>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation>Pas assez de descripteurs de fichiers disponibles.</translation>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Reconstruire l'index de la chaîne des blocs à partir des fichiers blk000??.dat actuels</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>Définir le nombre d'exétrons pour desservir les appels RPC (par défaut : 4)</translation>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>Vérification des blocs...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Vérification du porte-monnaie...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Importe des blocs depuis un fichier blk000??.dat externe</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation>Définir le nombre de fils d’exécution pour la vérification des scripts (maximum 16, 0 = auto, < 0 = laisser ce nombre de cœurs libres, par défaut : 0)</translation>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>Informations</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Adresse -tor invalide : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation>Montant invalide pour -minrelayfee=<montant> : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation>Montant invalide pour -mintxfee=<montant> : « %s »</translation>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>Maintenir un index complet des transactions (par défaut : 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Tampon maximal de réception par -connection, <n>*1000 octets (par défaut : 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Tampon maximal d'envoi par connexion, <n>*1000 octets (par défaut : 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>N'accepter que la chaîne de blocs correspondant aux points de vérification internes (par défaut : 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Se connecter uniquement aux nœuds du réseau <net> (IPv4, IPv6 ou Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Afficher des information de débogage supplémentaires. Cela signifie toutes les autres options -debug*</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Afficher des informations de débogage réseau supplémentaires</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Faire précéder les données de débogage par un horodatage</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the FairQuark Wiki for SSL setup instructions)</source>
<translation>Options SSL : (cf. le wiki de FairQuark pour les instructions de configuration du SSL)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Sélectionner la version du proxy socks à utiliser (4-5, 5 étant la valeur par défaut)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Envoyer les informations de débogage/trace à la console au lieu du fichier debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Envoyer les informations de débogage/trace au débogueur</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Définir la taille maximale des blocs en octets (par défaut : 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Définir la taille minimale des blocs en octets (par défaut : 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Réduire le fichier debug.log lors du démarrage du client (par défaut : 1 lorsque -debug n'est pas présent)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation>La signature de la transaction a échoué</translation>
</message><|fim▁hole|> <source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Spécifier le délai d'expiration de la connexion en millisecondes (par défaut : 5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>Erreur système :</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation>Montant de la transaction trop bas</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation>Les montants de la transaction doivent être positifs</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation>Transaction trop volumineuse</translation>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Utiliser l'UPnP pour rediriger le port d'écoute (par défaut : 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Utiliser l'UPnP pour rediriger le port d'écoute (par défaut : 1 lors de l'écoute)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Utiliser un proxy pour atteindre les services cachés de Tor (par défaut : même valeur que -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Nom d'utilisateur pour les connexions JSON-RPC</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>Avertissement</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Avertissement : cette version est obsolète, une mise à jour est nécessaire !</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation>Vous devez reconstruire les bases de données avec -reindex pour modifier -txindex</translation>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrompu, la récupération a échoué</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Mot de passe pour les connexions JSON-RPC</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Autoriser les connexions JSON-RPC depuis l'adresse IP spécifiée</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Envoyer des commandes au nœud fonctionnant à <ip> (par défaut : 127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Exécuter la commande lorsque le meilleur bloc change (%s est remplacé par le hachage du bloc dans cmd)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Mettre à jour le format du porte-monnaie</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Régler la taille de la plage de clefs sur <n> (par défaut : 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Réanalyser la chaîne de blocs pour les transactions de porte-monnaie manquantes</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Utiliser OpenSSL (https) pour les connexions JSON-RPC</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Fichier de certificat serveur (par défaut : server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Clef privée du serveur (par défaut : server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Clefs de chiffrement acceptables (par défaut : TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Ce message d'aide</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Impossible de se lier à %s sur cet ordinateur (bind a retourné l'erreur %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Connexion via un proxy socks</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Autoriser les recherches DNS pour -addnode, -seednode et -connect</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Chargement des adresses…</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Erreur lors du chargement de wallet.dat : porte-monnaie corrompu</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of FairQuark</source>
<translation>Erreur lors du chargement de wallet.dat : le porte-monnaie nécessite une version plus récente de FairQuark</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart FairQuark to complete</source>
<translation>Le porte-monnaie nécessitait une réécriture : veuillez redémarrer FairQuark pour terminer l'opération</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Erreur lors du chargement de wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Adresse -proxy invalide : « %s »</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Réseau inconnu spécifié sur -onlynet : « %s »</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Version inconnue de proxy -socks demandée : %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Impossible de résoudre l'adresse -bind : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Impossible de résoudre l'adresse -externalip : « %s »</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Montant invalide pour -paytxfee=<montant> : « %s »</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Montant invalide</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Fonds insuffisants</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Chargement de l’index des blocs…</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Ajouter un nœud auquel se connecter et tenter de garder la connexion ouverte</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. FairQuark is probably already running.</source>
<translation>Impossible de se lier à %s sur cet ordinateur. FairQuark fonctionne probablement déjà.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Frais par Ko à ajouter aux transactions que vous enverrez</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Chargement du porte-monnaie…</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>Impossible de revenir à une version antérieure du porte-monnaie</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Impossible d'écrire l'adresse par défaut</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Nouvelle analyse…</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Chargement terminé</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Pour utiliser l'option %s</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Erreur</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Vous devez ajouter la ligne rpcpassword=<mot-de-passe> au fichier de configuration :
%s
Si le fichier n'existe pas, créez-le avec les droits de lecture seule accordés au propriétaire.</translation>
</message>
</context>
</TS><|fim▁end|> | <message>
<location line="+2"/> |
<|file_name|>col_to_cluster.py<|end_file_name|><|fim▁begin|># -*- coding: utf8
from __future__ import division, print_function
from collections import defaultdict
from matplotlib import pyplot as plt
from radar import radar_factory
from scipy import stats
from scripts import initialize_matplotlib
import numpy as np
import plac
import sys
REFERRER_ABBRV = {
'EXTERNAL':'EXT.',
'FEATURED':'FEAT.',
'INTERNAL':'INT.',
'MOBILE':'MOBI.',
'SEARCH':'SEAR.',
'SOCIAL':'SOC.',
'VIRAL':'VIR.'}
CATEG_ABBRV = {
'Autos&Vehicles':'Vehi.',
'Autos':'Vehi.',
'Comedy':'Com.',
'Education':'Edu.',
'Entertainment':'Ent.',
'Film':'Film',
'Film&Animation':'Film',
'Games':'Game',
'Gaming':'Game',
'Howto':'Howto',
'Howto&Style':'Howto',
'Movies':'Film',
'Music':'Music',
'NULL':'-',
'News':'News',
'News&Politics':'News',
'Nonprofit':'Nonprof.',
'Nonprofits&Activism':'Nonprof.',
'People&Blogs':'People',
'People':'People',
'Pets&Animals':'Pets',
'Pets':'Pets',
'Animals':'Pets',
'Science&Technology':'Sci.',
'Science':'Sci.',
'Tech':'Sci.',
'Shows':'Show',
'Sports':'Sport',
'Trailers':'Film',
'Travel&Events':'Travel',
'Travel':'Travel'}
def load_text_file(features_fpath, col_to_use, classes):
to_plot = defaultdict(lambda: defaultdict(float))
sum_classes = defaultdict(float)
labels = set()
with open(features_fpath) as features_file:
for curr_line, line in enumerate(features_file):
spl = line.split()
if col_to_use >= len(spl):
continue
data = CATEG_ABBRV[line.split()[col_to_use].strip()]
class_num = classes[curr_line]
labels.add(data)
sum_classes[class_num] += 1
to_plot[class_num][data] += 1
return to_plot, sum_classes, sorted(labels)
def load_svm_file(features_fpath, classes):
col_dict = {
'EXTERNAL':13,
'FEATURED':14,
'INTERNAL':15,
'MOBILE':16,
'SEARCH':17,
'SOCIAL':18,
'VIRAL':19
}
to_plot = defaultdict(lambda: defaultdict(float))
sum_classes = defaultdict(float)
labels = set()
with open(features_fpath) as features_file:
curr_line = 0
for line in features_file:
if '#' in line:
for key, id_ in col_dict.items():
print(id_, key, line.split()[id_])
continue
class_num = classes[curr_line]
sum_classes[class_num] += float(line.split()[-1])
for ref_name, col_id in col_dict.items():
ref_abbrv = REFERRER_ABBRV[ref_name]
val = float(line.split()[col_id])
present = val > 0
if present:
labels.add(ref_abbrv)
to_plot[class_num][ref_abbrv] += val
<|fim▁hole|> return to_plot, sum_classes, sorted(labels)
def generate_data_plot(to_plot, sum_classes, labels, classes):
num_classes = len(set(classes))
colors = ['b', 'g', 'm', 'y']
total = 0
for class_num in xrange(num_classes):
color = colors[class_num]
data_plot = []
for label in labels:
total += to_plot[class_num][label]
data_plot.append(to_plot[class_num][label] / sum_classes[class_num])
yield data_plot, color, class_num
def radar_plot(labels, data_plots, out_fpath):
theta = radar_factory(len(labels))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='radar')
for data_plot, color, class_num in data_plots:
ax.plot(theta, data_plot, color=color, label='C%d'%class_num)
ax.fill(theta, data_plot, facecolor=color, alpha=0.25)
ax.set_varlabels(labels)
plt.legend(frameon=False, ncol=4, bbox_to_anchor=(0.5, -0.15),
loc='lower center')
plt.savefig(out_fpath)
def chisq(counts, expected_prob):
counts = np.array(counts)
expected = np.array(expected_prob) * counts.sum()
return stats.chisquare(counts, expected)[1]
def allchisq(to_plot, sum_classes, labels, classes):
num_classes = len(set(classes))
totals = []
for label in labels:
sum_ = 0
for class_num in xrange(num_classes):
sum_ += to_plot[class_num][label]
totals.append(sum_)
probs = []
sum_totals = sum(totals)
for i, t in enumerate(totals):
probs.append( t / sum_totals)
for class_num in xrange(num_classes):
counts = []
for label in labels:
counts.append(to_plot[class_num][label])
chisq(counts, probs)
def stacked_bars(labels, data_plots, out_fpath, label_translation, ref=True):
x_locations = [1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19]
data_class = {}
data_label = {}
for data, _, class_num in data_plots:
best_idx = np.argsort(data)[::-1][:4]
best_cls = np.array(data)[best_idx]
best_lbl = np.array(labels)[best_idx]
data_class[label_translation[class_num]] = best_cls
data_label[label_translation[class_num]] = best_lbl
bar_data = []
bar_labels = []
for cls in sorted(data_class):
bar_data.extend(data_class[cls])
bar_labels.extend(data_label[cls])
colors = ['b', 'g', 'm', 'r', 'y', 'c', '#A617A1', '#2B5700', 'w',
'#FF7300', 'k'] * 3
colored={}
if ref:
to_use = set(REFERRER_ABBRV.values())
else:
to_use = set(CATEG_ABBRV.values())
for i, l in enumerate(to_use):
colored[l] = colors[i]
for x, y, l in zip(x_locations, bar_data, bar_labels):
c = colored[l]
plt.bar(left=x, height=y, color=c, width=1, alpha=0.5)
plt.text(x + .75, y, l, va='bottom', ha='center', rotation=45)
plt.xlim(xmin=0, xmax=21)
plt.xlabel('Cluster')
if ref:
plt.ylim(ymin=0, ymax=.31)
plt.ylabel('Fraction of Views in Cluster')
else:
plt.ylim(ymin=0, ymax=.4)
plt.ylabel('Fraction of Videos in Cluster')
plt.xticks([3, 8, 13, 18], ['$C0$', '$C1$', '$C2$', '$C3'])
plt.savefig(out_fpath)
@plac.annotations(features_fpath=plac.Annotation('Features file', type=str),
classes_fpath=plac.Annotation('Video classes file', type=str),
out_fpath=plac.Annotation('Plot file', type=str),
trans_fpath=plac.Annotation('Translation of cluster num to labe',
type=str),
col_to_use=plac.Annotation('Column number to use', type=int,
kind='option', abbrev='c'),
is_text_features=plac.Annotation('Indicates file type',
kind='flag', abbrev='t',
type=bool))
def main(features_fpath, classes_fpath, out_fpath,
trans_fpath, col_to_use=2, is_text_features=False):
initialize_matplotlib()
classes = np.loadtxt(classes_fpath)
if is_text_features:
to_plot, sum_classes, labels = \
load_text_file(features_fpath, col_to_use, classes)
ref=False
else:
to_plot, sum_classes, labels = \
load_svm_file(features_fpath, classes)
ref=True
trans = {}
with open(trans_fpath) as f:
for l in f:
spl = l.split()
trans[int(spl[0])] = int(spl[1])
data = generate_data_plot(to_plot, sum_classes, labels, classes)
stacked_bars(labels, data, out_fpath, trans, ref)
#allchisq(to_plot, sum_classes, labels, classes)
if __name__ == '__main__':
sys.exit(plac.call(main))<|fim▁end|> | curr_line += 1
|
<|file_name|>vacuum.py<|end_file_name|><|fim▁begin|>"""Shark IQ Wrapper."""
from __future__ import annotations
import logging
from typing import Iterable
from sharkiqpy import OperatingModes, PowerModes, Properties, SharkIqVacuum
from homeassistant.components.vacuum import (
STATE_CLEANING,
STATE_DOCKED,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STATUS,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, SHARK
from .update_coordinator import SharkIqUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
# Supported features
SUPPORT_SHARKIQ = (
SUPPORT_BATTERY
| SUPPORT_FAN_SPEED
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_START
| SUPPORT_STATE
| SUPPORT_STATUS<|fim▁hole|> | SUPPORT_STOP
| SUPPORT_LOCATE
)
OPERATING_STATE_MAP = {
OperatingModes.PAUSE: STATE_PAUSED,
OperatingModes.START: STATE_CLEANING,
OperatingModes.STOP: STATE_IDLE,
OperatingModes.RETURN: STATE_RETURNING,
}
FAN_SPEEDS_MAP = {
"Eco": PowerModes.ECO,
"Normal": PowerModes.NORMAL,
"Max": PowerModes.MAX,
}
STATE_RECHARGING_TO_RESUME = "recharging_to_resume"
# Attributes to expose
ATTR_ERROR_CODE = "last_error_code"
ATTR_ERROR_MSG = "last_error_message"
ATTR_LOW_LIGHT = "low_light"
ATTR_RECHARGE_RESUME = "recharge_and_resume"
ATTR_RSSI = "rssi"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Shark IQ vacuum cleaner."""
coordinator: SharkIqUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
devices: Iterable[SharkIqVacuum] = coordinator.shark_vacs.values()
device_names = [d.name for d in devices]
_LOGGER.debug(
"Found %d Shark IQ device(s): %s",
len(device_names),
", ".join([d.name for d in devices]),
)
async_add_entities([SharkVacuumEntity(d, coordinator) for d in devices])
class SharkVacuumEntity(CoordinatorEntity, StateVacuumEntity):
"""Shark IQ vacuum entity."""
def __init__(self, sharkiq: SharkIqVacuum, coordinator: SharkIqUpdateCoordinator):
"""Create a new SharkVacuumEntity."""
super().__init__(coordinator)
self.sharkiq = sharkiq
def clean_spot(self, **kwargs):
"""Clean a spot. Not yet implemented."""
raise NotImplementedError()
def send_command(self, command, params=None, **kwargs):
"""Send a command to the vacuum. Not yet implemented."""
raise NotImplementedError()
@property
def is_online(self) -> bool:
"""Tell us if the device is online."""
return self.coordinator.device_is_online(self.sharkiq.serial_number)
@property
def name(self) -> str:
"""Device name."""
return self.sharkiq.name
@property
def serial_number(self) -> str:
"""Vacuum API serial number (DSN)."""
return self.sharkiq.serial_number
@property
def model(self) -> str:
"""Vacuum model number."""
if self.sharkiq.vac_model_number:
return self.sharkiq.vac_model_number
return self.sharkiq.oem_model_number
@property
def device_info(self) -> dict:
"""Device info dictionary."""
return {
"identifiers": {(DOMAIN, self.serial_number)},
"name": self.name,
"manufacturer": SHARK,
"model": self.model,
"sw_version": self.sharkiq.get_property_value(
Properties.ROBOT_FIRMWARE_VERSION
),
}
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_SHARKIQ
@property
def is_docked(self) -> bool | None:
"""Is vacuum docked."""
return self.sharkiq.get_property_value(Properties.DOCKED_STATUS)
@property
def error_code(self) -> int | None:
"""Return the last observed error code (or None)."""
return self.sharkiq.error_code
@property
def error_message(self) -> str | None:
"""Return the last observed error message (or None)."""
if not self.error_code:
return None
return self.sharkiq.error_text
@property
def operating_mode(self) -> str | None:
"""Operating mode.."""
op_mode = self.sharkiq.get_property_value(Properties.OPERATING_MODE)
return OPERATING_STATE_MAP.get(op_mode)
@property
def recharging_to_resume(self) -> int | None:
"""Return True if vacuum set to recharge and resume cleaning."""
return self.sharkiq.get_property_value(Properties.RECHARGING_TO_RESUME)
@property
def state(self):
"""
Get the current vacuum state.
NB: Currently, we do not return an error state because they can be very, very stale.
In the app, these are (usually) handled by showing the robot as stopped and sending the
user a notification.
"""
if self.is_docked:
return STATE_DOCKED
return self.operating_mode
@property
def unique_id(self) -> str:
"""Return the unique id of the vacuum cleaner."""
return self.serial_number
@property
def available(self) -> bool:
"""Determine if the sensor is available based on API results."""
# If the last update was successful...
return self.coordinator.last_update_success and self.is_online
@property
def battery_level(self):
"""Get the current battery level."""
return self.sharkiq.get_property_value(Properties.BATTERY_CAPACITY)
async def async_return_to_base(self, **kwargs):
"""Have the device return to base."""
await self.sharkiq.async_set_operating_mode(OperatingModes.RETURN)
await self.coordinator.async_refresh()
async def async_pause(self):
"""Pause the cleaning task."""
await self.sharkiq.async_set_operating_mode(OperatingModes.PAUSE)
await self.coordinator.async_refresh()
async def async_start(self):
"""Start the device."""
await self.sharkiq.async_set_operating_mode(OperatingModes.START)
await self.coordinator.async_refresh()
async def async_stop(self, **kwargs):
"""Stop the device."""
await self.sharkiq.async_set_operating_mode(OperatingModes.STOP)
await self.coordinator.async_refresh()
async def async_locate(self, **kwargs):
"""Cause the device to generate a loud chirp."""
await self.sharkiq.async_find_device()
@property
def fan_speed(self) -> str:
"""Return the current fan speed."""
fan_speed = None
speed_level = self.sharkiq.get_property_value(Properties.POWER_MODE)
for k, val in FAN_SPEEDS_MAP.items():
if val == speed_level:
fan_speed = k
return fan_speed
async def async_set_fan_speed(self, fan_speed: str, **kwargs):
"""Set the fan speed."""
await self.sharkiq.async_set_property_value(
Properties.POWER_MODE, FAN_SPEEDS_MAP.get(fan_speed.capitalize())
)
await self.coordinator.async_refresh()
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(FAN_SPEEDS_MAP)
# Various attributes we want to expose
@property
def recharge_resume(self) -> bool | None:
"""Recharge and resume mode active."""
return self.sharkiq.get_property_value(Properties.RECHARGE_RESUME)
@property
def rssi(self) -> int | None:
"""Get the WiFi RSSI."""
return self.sharkiq.get_property_value(Properties.RSSI)
@property
def low_light(self):
"""Let us know if the robot is operating in low-light mode."""
return self.sharkiq.get_property_value(Properties.LOW_LIGHT_MISSION)
@property
def extra_state_attributes(self) -> dict:
"""Return a dictionary of device state attributes specific to sharkiq."""
data = {
ATTR_ERROR_CODE: self.error_code,
ATTR_ERROR_MSG: self.sharkiq.error_text,
ATTR_LOW_LIGHT: self.low_light,
ATTR_RECHARGE_RESUME: self.recharge_resume,
}
return data<|fim▁end|> | |
<|file_name|>client.py<|end_file_name|><|fim▁begin|>from emburse.resource import (
EmburseObject,
Account,
Allowance,
Card,
Category,
Company,
Department,
Label,
Location,
Member,
SharedLink,
Statement,
Transaction
)
class Client(EmburseObject):
"""
Emburse API Client
API enables for the creation of expense cards at scale for custom business solutions as well as for
third-party app integrations. Cards can be created with set spending limits and assigned with just an email.
Some use cases include vendor payments, employee expense control, and fleet card management.
API Version:
v1
API Docs:
https://www.emburse.com/api/v1/docs#getting-started
Authors:
Marc Ford <[email protected]>
"""
@property
def Account(self):
"""
Emburse Account Object,<|fim▁hole|> """
return Account(auth_token=self.auth_token)
@property
def Allowance(self):
"""
Emburse Allowance Object,
configured with the auth token from the client
:return: A configured emburse.resource.Allowance
:rtype: Allowance
"""
return Allowance(auth_token=self.auth_token)
@property
def Card(self):
"""
Emburse Card Object,
configured with the auth token from the client
:return: A configured emburse.resource.Card
:rtype: Card
"""
return Card(auth_token=self.auth_token)
@property
def Category(self):
"""
Emburse Category Object,
configured with the auth token from the client
:return: A configured emburse.resource.Category
:rtype: Category
"""
return Category(auth_token=self.auth_token)
@property
def Company(self):
"""
Emburse Company Object,
configured with the auth token from the client
:return: A configured emburse.resource.Company
:rtype: Company
"""
return Company(auth_token=self.auth_token)
@property
def Department(self):
"""
Emburse Department Object,
configured with the auth token from the client
:return: A configured emburse.resource.Department
:rtype: Department
"""
return Department(auth_token=self.auth_token)
@property
def Label(self):
"""
Emburse Label Object,
configured with the auth token from the client
:return: A configured emburse.resource.Label
:rtype: Label
"""
return Label(auth_token=self.auth_token)
@property
def Location(self):
"""
Emburse Location Object,
configured with the auth token from the client
:return: A configured emburse.resource.Location
:rtype: Location
"""
return Location(auth_token=self.auth_token)
@property
def Member(self):
"""
Emburse Member Object,
configured with the auth token from the client
:return: A configured emburse.resource.Member
:rtype: Member
"""
return Member(auth_token=self.auth_token)
@property
def SharedLink(self):
"""
Emburse SharedLink Object,
configured with the auth token from the client
:return: A configured emburse.resource.SharedLink
:rtype: SharedLink
"""
return SharedLink(auth_token=self.auth_token)
@property
def Statement(self):
"""
Emburse Statement Object,
configured with the auth token from the client
:return: A configured emburse.resource.Statement
:rtype: Statement
"""
return Statement(auth_token=self.auth_token)
@property
def Transaction(self):
"""
Emburse Transaction Object,
configured with the auth token from the client
:return: A configured emburse.resource.Transaction
:rtype: Transaction
"""
return Transaction(auth_token=self.auth_token)<|fim▁end|> | configured with the auth token from the client
:return: A configured emburse.resource.Account
:rtype: Account |
<|file_name|>imports.rs<|end_file_name|><|fim▁begin|>use std::path::PathBuf;
use std::fs::File;
use std::collections::HashMap;
use errors::*;
use ast::*;
use symbols::*;
use parser;
use semantics;
#[derive(Debug)]
pub struct ModuleImporter {
modules: HashMap<PathBuf, Module>,
symbol_tables: HashMap<PathBuf, Scope>,
search_paths: Vec<PathBuf>,
}
impl ModuleImporter {
pub fn new(search_paths: Vec<PathBuf>) -> ModuleImporter {
ModuleImporter {
modules: HashMap::new(),
symbol_tables: HashMap::new(),
search_paths: search_paths,
}
}
fn resolve_import(&self, module_name: &str) -> Result<PathBuf> {
// generate file name from search path
let mut module_path_buf = PathBuf::new();
for module_part in module_name.split('.') {
module_path_buf.push(module_part);
}
module_path_buf.set_extension("silver");
// for all the search paths, check if the file exists
for search_path in &self.search_paths {
let module_file = search_path.join(&module_path_buf);
if module_file.exists() {
return Ok(module_file);
}
}
// if we get here, we can't find any module
Err(ErrorKind::ModuleNotFound(String::from(module_name)).into())
}
fn check_import_self(&self, module: &mut Module) -> Result<()> {
for import in &module.imports {
if import.module_id == module.module_id {
bail!(ErrorKind::ImportSelf(module.module_id.clone()));
}
}
Ok(())
}
fn inject_prelude(&self, module: &mut Module) {
if module.module_id == "Prelude" {
return ();
}
for import in &module.imports {
if import.module_id == "Prelude" {
return ();
}
}
module.imports.push(Import::new(String::from("Prelude"), Vec::new(), None))
}
pub fn resolve_imports(&self, module: &mut Module) -> Result<Vec<PathBuf>> {
try!(self.check_import_self(module));
self.inject_prelude(module);
let mut file_paths = Vec::new();
for import in &module.imports {
let import_file_path = try!(self.resolve_import(&import.module_id));
file_paths.push(import_file_path);
}
Ok(file_paths)
}
<|fim▁hole|>
if !self.symbol_tables.contains_key(&file_path) {
// parse
let source_file = try!(File::open(&file_path));
let mut ast = try!(parser::parse(&source_file));
// recursively import modules
let mut root_scope: Scope = Default::default();
let resolved_module_paths = try!(self.resolve_imports(&mut ast));
for module_path in resolved_module_paths {
try!(self.import_module(module_path.clone()));
try!(root_scope.merge(&self.symbol_tables[&module_path]));
}
// semantics
let child_symbol_table = Scope::new(Some(Box::new(root_scope)));
let symbol_table = try!(semantics::is_ast_valid(&ast, child_symbol_table));
// inject into self
self.modules.insert(file_path.clone(), ast);
self.symbol_tables.insert(file_path.clone(), symbol_table);
}
Ok(())
}
pub fn resolve_symbol_tables(&mut self, module: &mut Module) -> Result<Vec<&Scope>> {
let resolved_module_paths = try!(self.resolve_imports(module));
for module_path in resolved_module_paths {
try!(self.import_module(module_path));
}
Ok(self.symbol_tables.values().collect::<Vec<_>>())
}
}<|fim▁end|> | pub fn import_module(&mut self, file_path: PathBuf) -> Result<()> { |
<|file_name|>hello.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# hello.py
# A Hello World program using Tkinter package.
#
# Author: Billy Wilson Arante
# Created: 2016/10/29 EDT
#
# Attribution: http://effbot.org/tkinterbook/tkinter-hello-tkinter.htm<|fim▁hole|>from Tkinter import *
def main():
"""Main"""
root = Tk()
label = Label(root, text="Hello, world!")
label.pack()
root.mainloop()
if __name__ == "__main__":
# Executes only if run as script
main()<|fim▁end|> | |
<|file_name|>dreamer_model.py<|end_file_name|><|fim▁begin|>import numpy as np
from typing import Any, List, Tuple
from ray.rllib.models.torch.misc import Reshape
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.framework import TensorType
torch, nn = try_import_torch()
if torch:
from torch import distributions as td
from ray.rllib.agents.dreamer.utils import Linear, Conv2d, \
ConvTranspose2d, GRUCell, TanhBijector
ActFunc = Any
# Encoder, part of PlaNET
class ConvEncoder(nn.Module):
"""Standard Convolutional Encoder for Dreamer. This encoder is used
to encode images frm an enviornment into a latent state for the
RSSM model in PlaNET.
"""
def __init__(self,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64)):
"""Initializes Conv Encoder
Args:
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
init_channels = self.shape[0]
self.layers = [
Conv2d(init_channels, self.depth, 4, stride=2),
self.act(),
Conv2d(self.depth, 2 * self.depth, 4, stride=2),
self.act(),
Conv2d(2 * self.depth, 4 * self.depth, 4, stride=2),
self.act(),
Conv2d(4 * self.depth, 8 * self.depth, 4, stride=2),
self.act(),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# Flatten to [batch*horizon, 3, 64, 64] in loss function
orig_shape = list(x.size())
x = x.view(-1, *(orig_shape[-3:]))
x = self.model(x)
new_shape = orig_shape[:-3] + [32 * self.depth]
x = x.view(*new_shape)
return x
# Decoder, part of PlaNET
class ConvDecoder(nn.Module):
"""Standard Convolutional Decoder for Dreamer.
This decoder is used to decode images from the latent state generated
by the transition dynamics model. This is used in calculating loss and
logging gifs for imagined trajectories.
"""
def __init__(self,
input_size: int,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64)):
"""Initializes a ConvDecoder instance.
Args:
input_size (int): Input size, usually feature size output from
RSSM.
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
self.layers = [
Linear(input_size, 32 * self.depth),
Reshape([-1, 32 * self.depth, 1, 1]),
ConvTranspose2d(32 * self.depth, 4 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(4 * self.depth, 2 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(2 * self.depth, self.depth, 6, stride=2),
self.act(),
ConvTranspose2d(self.depth, self.shape[0], 6, stride=2),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# x is [batch, hor_length, input_size]
orig_shape = list(x.size())
x = self.model(x)
reshape_size = orig_shape[:-1] + self.shape
mean = x.view(*reshape_size)
# Equivalent to making a multivariate diag
return td.Independent(td.Normal(mean, 1), len(self.shape))
# Reward Model (PlaNET), and Value Function
class DenseDecoder(nn.Module):
"""FC network that outputs a distribution for calculating log_prob.
Used later in DreamerLoss.
"""
def __init__(self,
input_size: int,
output_size: int,
layers: int,
units: int,
dist: str = "normal",
act: ActFunc = None):
"""Initializes FC network
Args:
input_size (int): Input size to network
output_size (int): Output size to network
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, parameterized by FC output
logits.
act (Any): Activation function
"""
super().__init__()
self.layrs = layers
self.units = units
self.act = act
if not act:
self.act = nn.ELU
self.dist = dist
self.input_size = input_size
self.output_size = output_size
self.layers = []
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = units
self.layers.append(Linear(cur_size, output_size))
self.model = nn.Sequential(*self.layers)
def forward(self, x):
x = self.model(x)
if self.output_size == 1:
x = torch.squeeze(x)
if self.dist == "normal":
output_dist = td.Normal(x, 1)
elif self.dist == "binary":
output_dist = td.Bernoulli(logits=x)
else:
raise NotImplementedError("Distribution type not implemented!")
return td.Independent(output_dist, 0)
# Represents dreamer policy
class ActionDecoder(nn.Module):
"""ActionDecoder is the policy module in Dreamer.
It outputs a distribution parameterized by mean and std, later to be<|fim▁hole|> transformed by a custom TanhBijector in utils.py for Dreamer.
"""
def __init__(self,
input_size: int,
action_size: int,
layers: int,
units: int,
dist: str = "tanh_normal",
act: ActFunc = None,
min_std: float = 1e-4,
init_std: float = 5.0,
mean_scale: float = 5.0):
"""Initializes Policy
Args:
input_size (int): Input size to network
action_size (int): Action space size
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, with tanh_normal implemented
act (Any): Activation function
min_std (float): Minimum std for output distribution
init_std (float): Intitial std
mean_scale (float): Augmenting mean output from FC network
"""
super().__init__()
self.layrs = layers
self.units = units
self.dist = dist
self.act = act
if not act:
self.act = nn.ReLU
self.min_std = min_std
self.init_std = init_std
self.mean_scale = mean_scale
self.action_size = action_size
self.layers = []
self.softplus = nn.Softplus()
# MLP Construction
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = self.units
if self.dist == "tanh_normal":
self.layers.append(Linear(cur_size, 2 * action_size))
elif self.dist == "onehot":
self.layers.append(Linear(cur_size, action_size))
self.model = nn.Sequential(*self.layers)
# Returns distribution
def forward(self, x):
raw_init_std = np.log(np.exp(self.init_std) - 1)
x = self.model(x)
if self.dist == "tanh_normal":
mean, std = torch.chunk(x, 2, dim=-1)
mean = self.mean_scale * torch.tanh(mean / self.mean_scale)
std = self.softplus(std + raw_init_std) + self.min_std
dist = td.Normal(mean, std)
transforms = [TanhBijector()]
dist = td.transformed_distribution.TransformedDistribution(
dist, transforms)
dist = td.Independent(dist, 1)
elif self.dist == "onehot":
dist = td.OneHotCategorical(logits=x)
raise NotImplementedError("Atari not implemented yet!")
return dist
# Represents TD model in PlaNET
class RSSM(nn.Module):
"""RSSM is the core recurrent part of the PlaNET module. It consists of
two networks, one (obs) to calculate posterior beliefs and states and
the second (img) to calculate prior beliefs and states. The prior network
takes in the previous state and action, while the posterior network takes
in the previous state, action, and a latent embedding of the most recent
observation.
"""
def __init__(self,
action_size: int,
embed_size: int,
stoch: int = 30,
deter: int = 200,
hidden: int = 200,
act: ActFunc = None):
"""Initializes RSSM
Args:
action_size (int): Action space size
embed_size (int): Size of ConvEncoder embedding
stoch (int): Size of the distributional hidden state
deter (int): Size of the deterministic hidden state
hidden (int): General size of hidden layers
act (Any): Activation function
"""
super().__init__()
self.stoch_size = stoch
self.deter_size = deter
self.hidden_size = hidden
self.act = act
if act is None:
self.act = nn.ELU
self.obs1 = Linear(embed_size + deter, hidden)
self.obs2 = Linear(hidden, 2 * stoch)
self.cell = GRUCell(self.hidden_size, hidden_size=self.deter_size)
self.img1 = Linear(stoch + action_size, hidden)
self.img2 = Linear(deter, hidden)
self.img3 = Linear(hidden, 2 * stoch)
self.softplus = nn.Softplus
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
def get_initial_state(self, batch_size: int) -> List[TensorType]:
"""Returns the inital state for the RSSM, which consists of mean,
std for the stochastic state, the sampled stochastic hidden state
(from mean, std), and the deterministic hidden state, which is
pushed through the GRUCell.
Args:
batch_size (int): Batch size for initial state
Returns:
List of tensors
"""
return [
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.deter_size).to(self.device),
]
def observe(self,
embed: TensorType,
action: TensorType,
state: List[TensorType] = None
) -> Tuple[List[TensorType], List[TensorType]]:
"""Returns the corresponding states from the embedding from ConvEncoder
and actions. This is accomplished by rolling out the RNN from the
starting state through eacn index of embed and action, saving all
intermediate states between.
Args:
embed (TensorType): ConvEncoder embedding
action (TensorType): Actions
state (List[TensorType]): Initial state before rollout
Returns:
Posterior states and prior states (both List[TensorType])
"""
if state is None:
state = self.get_initial_state(action.size()[0])
embed = embed.permute(1, 0, 2)
action = action.permute(1, 0, 2)
priors = [[] for i in range(len(state))]
posts = [[] for i in range(len(state))]
last = (state, state)
for index in range(len(action)):
# Tuple of post and prior
last = self.obs_step(last[0], action[index], embed[index])
[o.append(s) for s, o in zip(last[0], posts)]
[o.append(s) for s, o in zip(last[1], priors)]
prior = [torch.stack(x, dim=0) for x in priors]
post = [torch.stack(x, dim=0) for x in posts]
prior = [e.permute(1, 0, 2) for e in prior]
post = [e.permute(1, 0, 2) for e in post]
return post, prior
def imagine(self, action: TensorType,
state: List[TensorType] = None) -> List[TensorType]:
"""Imagines the trajectory starting from state through a list of actions.
Similar to observe(), requires rolling out the RNN for each timestep.
Args:
action (TensorType): Actions
state (List[TensorType]): Starting state before rollout
Returns:
Prior states
"""
if state is None:
state = self.get_initial_state(action.size()[0])
action = action.permute(1, 0, 2)
indices = range(len(action))
priors = [[] for _ in range(len(state))]
last = state
for index in indices:
last = self.img_step(last, action[index])
[o.append(s) for s, o in zip(last, priors)]
prior = [torch.stack(x, dim=0) for x in priors]
prior = [e.permute(1, 0, 2) for e in prior]
return prior
def obs_step(
self, prev_state: TensorType, prev_action: TensorType,
embed: TensorType) -> Tuple[List[TensorType], List[TensorType]]:
"""Runs through the posterior model and returns the posterior state
Args:
prev_state (TensorType): The previous state
prev_action (TensorType): The previous action
embed (TensorType): Embedding from ConvEncoder
Returns:
Post and Prior state
"""
prior = self.img_step(prev_state, prev_action)
x = torch.cat([prior[3], embed], dim=-1)
x = self.obs1(x)
x = self.act()(x)
x = self.obs2(x)
mean, std = torch.chunk(x, 2, dim=-1)
std = self.softplus()(std) + 0.1
stoch = self.get_dist(mean, std).rsample()
post = [mean, std, stoch, prior[3]]
return post, prior
def img_step(self, prev_state: TensorType,
prev_action: TensorType) -> List[TensorType]:
"""Runs through the prior model and returns the prior state
Args:
prev_state (TensorType): The previous state
prev_action (TensorType): The previous action
Returns:
Prior state
"""
x = torch.cat([prev_state[2], prev_action], dim=-1)
x = self.img1(x)
x = self.act()(x)
deter = self.cell(x, prev_state[3])
x = deter
x = self.img2(x)
x = self.act()(x)
x = self.img3(x)
mean, std = torch.chunk(x, 2, dim=-1)
std = self.softplus()(std) + 0.1
stoch = self.get_dist(mean, std).rsample()
return [mean, std, stoch, deter]
def get_feature(self, state: List[TensorType]) -> TensorType:
# Constructs feature for input to reward, decoder, actor, critic
return torch.cat([state[2], state[3]], dim=-1)
def get_dist(self, mean: TensorType, std: TensorType) -> TensorType:
return td.Normal(mean, std)
# Represents all models in Dreamer, unifies them all into a single interface
class DreamerModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
nn.Module.__init__(self)
self.depth = model_config["depth_size"]
self.deter_size = model_config["deter_size"]
self.stoch_size = model_config["stoch_size"]
self.hidden_size = model_config["hidden_size"]
self.action_size = action_space.shape[0]
self.encoder = ConvEncoder(self.depth)
self.decoder = ConvDecoder(
self.stoch_size + self.deter_size, depth=self.depth)
self.reward = DenseDecoder(self.stoch_size + self.deter_size, 1, 2,
self.hidden_size)
self.dynamics = RSSM(
self.action_size,
32 * self.depth,
stoch=self.stoch_size,
deter=self.deter_size)
self.actor = ActionDecoder(self.stoch_size + self.deter_size,
self.action_size, 4, self.hidden_size)
self.value = DenseDecoder(self.stoch_size + self.deter_size, 1, 3,
self.hidden_size)
self.state = None
self.device = (torch.device("cuda")
if torch.cuda.is_available() else torch.device("cpu"))
def policy(self, obs: TensorType, state: List[TensorType], explore=True
) -> Tuple[TensorType, List[float], List[TensorType]]:
"""Returns the action. Runs through the encoder, recurrent model,
and policy to obtain action.
"""
if state is None:
self.initial_state()
else:
self.state = state
post = self.state[:4]
action = self.state[4]
embed = self.encoder(obs)
post, _ = self.dynamics.obs_step(post, action, embed)
feat = self.dynamics.get_feature(post)
action_dist = self.actor(feat)
if explore:
action = action_dist.sample()
else:
action = action_dist.mean
logp = action_dist.log_prob(action)
self.state = post + [action]
return action, logp, self.state
def imagine_ahead(self, state: List[TensorType],
horizon: int) -> TensorType:
"""Given a batch of states, rolls out more state of length horizon.
"""
start = []
for s in state:
s = s.contiguous().detach()
shpe = [-1] + list(s.size())[2:]
start.append(s.view(*shpe))
def next_state(state):
feature = self.dynamics.get_feature(state).detach()
action = self.actor(feature).rsample()
next_state = self.dynamics.img_step(state, action)
return next_state
last = start
outputs = [[] for i in range(len(start))]
for _ in range(horizon):
last = next_state(last)
[o.append(s) for s, o in zip(last, outputs)]
outputs = [torch.stack(x, dim=0) for x in outputs]
imag_feat = self.dynamics.get_feature(outputs)
return imag_feat
def get_initial_state(self) -> List[TensorType]:
self.state = self.dynamics.get_initial_state(1) + [
torch.zeros(1, self.action_space.shape[0]).to(self.device)
]
return self.state
def value_function(self) -> TensorType:
return None<|fim▁end|> | |
<|file_name|>audio_renderer_impl_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/gtest_prod_util.h"
#include "base/message_loop.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/data_buffer.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_audio_renderer_sink.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/filters/audio_renderer_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::base::Time;
using ::base::TimeDelta;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::NiceMock;
using ::testing::StrictMock;
namespace media {
// Constants for distinguishing between muted audio and playing audio when using
// ConsumeBufferedData().
static uint8 kMutedAudio = 0x00;
static uint8 kPlayingAudio = 0x99;
class AudioRendererImplTest : public ::testing::Test {
public:
// Give the decoder some non-garbage media properties.
AudioRendererImplTest()
: renderer_(new AudioRendererImpl(
message_loop_.message_loop_proxy(),
new NiceMock<MockAudioRendererSink>(),
SetDecryptorReadyCB())),
demuxer_stream_(new MockDemuxerStream()),
decoder_(new MockAudioDecoder()),
audio_config_(kCodecVorbis, kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO, 44100, NULL, 0, false) {
EXPECT_CALL(*demuxer_stream_, type())
.WillRepeatedly(Return(DemuxerStream::AUDIO));
EXPECT_CALL(*demuxer_stream_, audio_decoder_config())
.WillRepeatedly(ReturnRef(audio_config_));
// Stub out time.
renderer_->set_now_cb_for_testing(base::Bind(
&AudioRendererImplTest::GetTime, base::Unretained(this)));
// Used to save callbacks and run them at a later time.
EXPECT_CALL(*decoder_, Read(_))
.WillRepeatedly(Invoke(this, &AudioRendererImplTest::ReadDecoder));
// Set up audio properties.
EXPECT_CALL(*decoder_, bits_per_channel())
.WillRepeatedly(Return(audio_config_.bits_per_channel()));
EXPECT_CALL(*decoder_, channel_layout())
.WillRepeatedly(Return(CHANNEL_LAYOUT_MONO));
EXPECT_CALL(*decoder_, samples_per_second())
.WillRepeatedly(Return(audio_config_.samples_per_second()));
}
virtual ~AudioRendererImplTest() {
SCOPED_TRACE("~AudioRendererImplTest()");
WaitableMessageLoopEvent event;
renderer_->Stop(event.GetClosure());
event.RunAndWait();
}
void ExpectUnsupportedAudioDecoder() {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(DECODER_ERROR_NOT_SUPPORTED));
}
void ExpectUnsupportedAudioDecoderConfig() {
EXPECT_CALL(*decoder_, bits_per_channel())
.WillRepeatedly(Return(3));
EXPECT_CALL(*decoder_, channel_layout())
.WillRepeatedly(Return(CHANNEL_LAYOUT_UNSUPPORTED));
EXPECT_CALL(*decoder_, samples_per_second())
.WillRepeatedly(Return(0));
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
}
MOCK_METHOD1(OnStatistics, void(const PipelineStatistics&));
MOCK_METHOD0(OnUnderflow, void());
MOCK_METHOD0(OnDisabled, void());
MOCK_METHOD1(OnError, void(PipelineStatus));
void OnAudioTimeCallback(TimeDelta current_time, TimeDelta max_time) {
CHECK(current_time <= max_time);
}
void Initialize() {
EXPECT_CALL(*decoder_, Initialize(_, _, _))
.WillOnce(RunCallback<1>(PIPELINE_OK));
InitializeWithStatus(PIPELINE_OK);
int channels = ChannelLayoutToChannelCount(decoder_->channel_layout());
int bytes_per_frame = decoder_->bits_per_channel() * channels / 8;
next_timestamp_.reset(new AudioTimestampHelper(
bytes_per_frame, decoder_->samples_per_second()));
}
void InitializeWithStatus(PipelineStatus expected) {
SCOPED_TRACE(base::StringPrintf("InitializeWithStatus(%d)", expected));
AudioRendererImpl::AudioDecoderList decoders;
decoders.push_back(decoder_);
WaitableMessageLoopEvent event;
renderer_->Initialize(
demuxer_stream_,
decoders,
event.GetPipelineStatusCB(),
base::Bind(&AudioRendererImplTest::OnStatistics,
base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnUnderflow,
base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnAudioTimeCallback,
base::Unretained(this)),
ended_event_.GetClosure(),
base::Bind(&AudioRendererImplTest::OnDisabled,
base::Unretained(this)),
base::Bind(&AudioRendererImplTest::OnError,
base::Unretained(this)));
event.RunAndWaitForStatus(expected);
// We should have no reads.
EXPECT_TRUE(read_cb_.is_null());
}
void Preroll() {
Preroll(0, PIPELINE_OK);
}
void Preroll(int timestamp_ms, PipelineStatus expected) {
SCOPED_TRACE(base::StringPrintf("Preroll(%d, %d)", timestamp_ms, expected));
TimeDelta timestamp = TimeDelta::FromMilliseconds(timestamp_ms);
next_timestamp_->SetBaseTimestamp(timestamp);
// Fill entire buffer to complete prerolling.
WaitableMessageLoopEvent event;
renderer_->Preroll(timestamp, event.GetPipelineStatusCB());
WaitForPendingRead();
DeliverRemainingAudio();
event.RunAndWaitForStatus(PIPELINE_OK);
// We should have no reads.
EXPECT_TRUE(read_cb_.is_null());
}
void Play() {
SCOPED_TRACE("Play()");
WaitableMessageLoopEvent event;
renderer_->Play(event.GetClosure());
renderer_->SetPlaybackRate(1.0f);
event.RunAndWait();
}
void WaitForEnded() {
SCOPED_TRACE("WaitForEnded()");
ended_event_.RunAndWait();
}
void WaitForPendingRead() {
SCOPED_TRACE("WaitForPendingRead()");
if (!read_cb_.is_null())
return;
DCHECK(wait_for_pending_read_cb_.is_null());
WaitableMessageLoopEvent event;
wait_for_pending_read_cb_ = event.GetClosure();
event.RunAndWait();
DCHECK(!read_cb_.is_null());
DCHECK(wait_for_pending_read_cb_.is_null());
}
// Delivers |size| bytes with value kPlayingAudio to |renderer_|.
void SatisfyPendingRead(size_t size) {
CHECK(!read_cb_.is_null());
scoped_refptr<DataBuffer> buffer = new DataBuffer(size);
buffer->SetDataSize(size);
memset(buffer->GetWritableData(), kPlayingAudio, buffer->GetDataSize());
buffer->SetTimestamp(next_timestamp_->GetTimestamp());
buffer->SetDuration(next_timestamp_->GetDuration(buffer->GetDataSize()));
next_timestamp_->AddBytes(buffer->GetDataSize());
DeliverBuffer(AudioDecoder::kOk, buffer);
}
void AbortPendingRead() {
DeliverBuffer(AudioDecoder::kAborted, NULL);
}
void DeliverEndOfStream() {
DeliverBuffer(AudioDecoder::kOk, DataBuffer::CreateEOSBuffer());
}
// Delivers bytes until |renderer_|'s internal buffer is full and no longer
// has pending reads.
void DeliverRemainingAudio() {
SatisfyPendingRead(bytes_remaining_in_buffer());
}
// Attempts to consume |size| bytes from |renderer_|'s internal buffer,
// returning true if all |size| bytes were consumed, false if less than
// |size| bytes were consumed.
//
// |muted| is optional and if passed will get set if the byte value of
// the consumed data is muted audio.
bool ConsumeBufferedData(uint32 size, bool* muted) {
scoped_array<uint8> buffer(new uint8[size]);
uint32 bytes_per_frame = (decoder_->bits_per_channel() / 8) *
ChannelLayoutToChannelCount(decoder_->channel_layout());
uint32 requested_frames = size / bytes_per_frame;
uint32 frames_read = renderer_->FillBuffer(
buffer.get(), requested_frames, 0);
if (frames_read > 0 && muted) {
*muted = (buffer[0] == kMutedAudio);
}
return (frames_read == requested_frames);
}
// Attempts to consume all data available from the renderer. Returns the
// number of frames read. Since time is frozen, the audio delay will increase
// as frames come in.
int ConsumeAllBufferedData() {
renderer_->DisableUnderflowForTesting();
int frames_read = 0;
int total_frames_read = 0;
const int kRequestFrames = 1024;
const uint32 bytes_per_frame = (decoder_->bits_per_channel() / 8) *
ChannelLayoutToChannelCount(decoder_->channel_layout());
scoped_array<uint8> buffer(new uint8[kRequestFrames * bytes_per_frame]);
do {
TimeDelta audio_delay = TimeDelta::FromMicroseconds(
total_frames_read * Time::kMicrosecondsPerSecond /
static_cast<float>(decoder_->samples_per_second()));
frames_read = renderer_->FillBuffer(
buffer.get(), kRequestFrames, audio_delay.InMilliseconds());
total_frames_read += frames_read;
} while (frames_read > 0);
return total_frames_read * bytes_per_frame;
}
uint32 bytes_buffered() {
return renderer_->algorithm_->bytes_buffered();
}
uint32 buffer_capacity() {
return renderer_->algorithm_->QueueCapacity();
}
uint32 bytes_remaining_in_buffer() {
// This can happen if too much data was delivered, in which case the buffer
// will accept the data but not increase capacity.
if (bytes_buffered() > buffer_capacity()) {
return 0;
}
return buffer_capacity() - bytes_buffered();
}
void CallResumeAfterUnderflow() {
renderer_->ResumeAfterUnderflow(false);
}
TimeDelta CalculatePlayTime(int bytes_filled) {
return TimeDelta::FromMicroseconds(
bytes_filled * Time::kMicrosecondsPerSecond /
renderer_->audio_parameters_.GetBytesPerSecond());
}
void EndOfStreamTest(float playback_rate) {
Initialize();
Preroll();
Play();
renderer_->SetPlaybackRate(playback_rate);
// Drain internal buffer, we should have a pending read.
int total_bytes = bytes_buffered();
int bytes_filled = ConsumeAllBufferedData();
WaitForPendingRead();
// Due to how the cross-fade algorithm works we won't get an exact match
// between the ideal and expected number of bytes consumed. In the faster
// than normal playback case, more bytes are created than should exist and
// vice versa in the slower than normal playback case.
const float kEpsilon = 0.10 * (total_bytes / playback_rate);
EXPECT_NEAR(bytes_filled, total_bytes / playback_rate, kEpsilon);
// Figure out how long until the ended event should fire.
TimeDelta audio_play_time = CalculatePlayTime(bytes_filled);
// Fulfill the read with an end-of-stream packet. We shouldn't report ended
// nor have a read until we drain the internal buffer.
DeliverEndOfStream();
// Advance time half way without an ended expectation.
AdvanceTime(audio_play_time / 2);
ConsumeBufferedData(bytes_buffered(), NULL);
// Advance time by other half and expect the ended event.
AdvanceTime(audio_play_time / 2);
ConsumeBufferedData(bytes_buffered(), NULL);
WaitForEnded();
}
void AdvanceTime(TimeDelta time) {
base::AutoLock auto_lock(lock_);
time_ += time;
}
// Fixture members.
MessageLoop message_loop_;
scoped_refptr<AudioRendererImpl> renderer_;
private:
Time GetTime() {
base::AutoLock auto_lock(lock_);
return time_;
}
void ReadDecoder(const AudioDecoder::ReadCB& read_cb) {
// TODO(scherkus): Make this a DCHECK after threading semantics are fixed.
if (MessageLoop::current() != &message_loop_) {
message_loop_.PostTask(FROM_HERE, base::Bind(
&AudioRendererImplTest::ReadDecoder,
base::Unretained(this), read_cb));
return;
}
CHECK(read_cb_.is_null()) << "Overlapping reads are not permitted";
read_cb_ = read_cb;<|fim▁hole|> base::ResetAndReturn(&wait_for_pending_read_cb_).Run();
}
void DeliverBuffer(AudioDecoder::Status status,
const scoped_refptr<DataBuffer>& buffer) {
CHECK(!read_cb_.is_null());
base::ResetAndReturn(&read_cb_).Run(status, buffer);
}
scoped_refptr<MockDemuxerStream> demuxer_stream_;
scoped_refptr<MockAudioDecoder> decoder_;
// Used for stubbing out time in the audio callback thread.
base::Lock lock_;
Time time_;
// Used for satisfying reads.
AudioDecoder::ReadCB read_cb_;
scoped_ptr<AudioTimestampHelper> next_timestamp_;
AudioDecoderConfig audio_config_;
WaitableMessageLoopEvent ended_event_;
// Run during ReadDecoder() to unblock WaitForPendingRead().
base::Closure wait_for_pending_read_cb_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererImplTest);
};
TEST_F(AudioRendererImplTest, Initialize_Failed) {
ExpectUnsupportedAudioDecoderConfig();
InitializeWithStatus(PIPELINE_ERROR_INITIALIZATION_FAILED);
}
TEST_F(AudioRendererImplTest, Initialize_Successful) {
Initialize();
}
TEST_F(AudioRendererImplTest, Initialize_DecoderInitFailure) {
ExpectUnsupportedAudioDecoder();
InitializeWithStatus(DECODER_ERROR_NOT_SUPPORTED);
}
TEST_F(AudioRendererImplTest, Preroll) {
Initialize();
Preroll();
}
TEST_F(AudioRendererImplTest, Play) {
Initialize();
Preroll();
Play();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
WaitForPendingRead();
}
TEST_F(AudioRendererImplTest, EndOfStream) {
EndOfStreamTest(1.0);
}
TEST_F(AudioRendererImplTest, EndOfStream_FasterPlaybackSpeed) {
EndOfStreamTest(2.0);
}
TEST_F(AudioRendererImplTest, EndOfStream_SlowerPlaybackSpeed) {
EndOfStreamTest(0.5);
}
TEST_F(AudioRendererImplTest, Underflow) {
Initialize();
Preroll();
Play();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
WaitForPendingRead();
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
const size_t kDataSize = 1024;
EXPECT_CALL(*this, OnUnderflow());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
renderer_->ResumeAfterUnderflow(false);
// Verify after resuming that we're still not getting data.
//
// NOTE: FillBuffer() satisfies the read but returns muted audio, which
// is crazy http://crbug.com/106600
bool muted = false;
EXPECT_EQ(0u, bytes_buffered());
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
// Deliver data, we should get non-muted audio.
DeliverRemainingAudio();
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_FALSE(muted);
}
TEST_F(AudioRendererImplTest, Underflow_EndOfStream) {
Initialize();
Preroll();
Play();
// Figure out how long until the ended event should fire. Since
// ConsumeBufferedData() doesn't provide audio delay information, the time
// until the ended event fires is equivalent to the longest buffered section,
// which is the initial bytes_buffered() read.
TimeDelta time_until_ended = CalculatePlayTime(bytes_buffered());
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
WaitForPendingRead();
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
const size_t kDataSize = 1024;
EXPECT_CALL(*this, OnUnderflow());
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
// Deliver a little bit of data.
SatisfyPendingRead(kDataSize);
WaitForPendingRead();
// Verify we're getting muted audio during underflow.
//
// NOTE: FillBuffer() satisfies the read but returns muted audio, which
// is crazy http://crbug.com/106600
bool muted = false;
EXPECT_EQ(kDataSize, bytes_buffered());
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
// Now deliver end of stream, we should get our little bit of data back.
DeliverEndOfStream();
EXPECT_EQ(kDataSize, bytes_buffered());
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_FALSE(muted);
// Deliver another end of stream buffer and attempt to read to make sure
// we're truly at the end of stream.
//
// TODO(scherkus): fix AudioRendererImpl and AudioRendererAlgorithmBase to
// stop reading after receiving an end of stream buffer. It should have also
// fired the ended callback http://crbug.com/106641
WaitForPendingRead();
DeliverEndOfStream();
AdvanceTime(time_until_ended);
EXPECT_FALSE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_FALSE(muted);
WaitForEnded();
}
TEST_F(AudioRendererImplTest, Underflow_ResumeFromCallback) {
Initialize();
Preroll();
Play();
// Drain internal buffer, we should have a pending read.
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
WaitForPendingRead();
// Verify the next FillBuffer() call triggers the underflow callback
// since the decoder hasn't delivered any data after it was drained.
const size_t kDataSize = 1024;
EXPECT_CALL(*this, OnUnderflow())
.WillOnce(Invoke(this, &AudioRendererImplTest::CallResumeAfterUnderflow));
EXPECT_FALSE(ConsumeBufferedData(kDataSize, NULL));
// Verify after resuming that we're still not getting data.
bool muted = false;
EXPECT_EQ(0u, bytes_buffered());
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_TRUE(muted);
// Deliver data, we should get non-muted audio.
DeliverRemainingAudio();
EXPECT_TRUE(ConsumeBufferedData(kDataSize, &muted));
EXPECT_FALSE(muted);
}
TEST_F(AudioRendererImplTest, AbortPendingRead_Preroll) {
Initialize();
// Start prerolling and wait for a read.
WaitableMessageLoopEvent event;
renderer_->Preroll(TimeDelta(), event.GetPipelineStatusCB());
WaitForPendingRead();
// Simulate the decoder aborting the pending read.
AbortPendingRead();
event.RunAndWaitForStatus(PIPELINE_OK);
// Preroll again to a different timestamp and verify it completed normally.
Preroll(1000, PIPELINE_OK);
}
TEST_F(AudioRendererImplTest, AbortPendingRead_Pause) {
Initialize();
Preroll();
Play();
// Partially drain internal buffer so we get a pending read.
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered() / 2, NULL));
WaitForPendingRead();
// Start pausing.
WaitableMessageLoopEvent event;
renderer_->Pause(event.GetClosure());
// Simulate the decoder aborting the pending read.
AbortPendingRead();
event.RunAndWait();
// Preroll again to a different timestamp and verify it completed normally.
Preroll(1000, PIPELINE_OK);
}
} // namespace media<|fim▁end|> |
// Wake up WaitForPendingRead() if needed.
if (!wait_for_pending_read_cb_.is_null()) |
<|file_name|>namespaces.test.js<|end_file_name|><|fim▁begin|>describe('Manual namespace managment', function() {
describe('Namespace#addNamespace', function() {
it('inserts a namespace for a given key', function() {
nsr.addNamespace('users').should.eq('test:users');
});
});
<|fim▁hole|> describe('Namespace#removeNamespace', function() {
it('removes a namespace for a given key', function() {
nsr.removeNamespace('test:users').should.eq('users');
});
});
});<|fim▁end|> | |
<|file_name|>0007_auto_20180813_1604.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ibms', '0006_auto_20180813_1603'),
]
operations = [
migrations.RenameField(
model_name='serviceprioritymappings',
old_name='costcentreName',
new_name='costCentreName',
),
]<|fim▁end|> | # Generated by Django 2.1 on 2018-08-13 08:04 |
<|file_name|>client_basics.rs<|end_file_name|><|fim▁begin|>extern crate riak;
use riak::Client;
use riak::bucket::{BucketProps, BucketTypeProps};
use riak::object::{DeleteObjectReq, FetchObjectReq, ObjectContent, StoreObjectReq};
use riak::yokozuna::{SearchQuery, YokozunaIndex};
use std::fs::File;
use std::io::Read;
#[test]
fn test_basics() {
// connect and ping
let mut riak = Client::new("10.0.0.2:8087").unwrap();
riak.ping().unwrap();
// get the server info
let (node, version) = riak.server_info().unwrap();
println!("connected to node {} running Riak version {}",
node,
version);
// set bucket properties
let mut bucket_props = BucketProps::new("testbucket");
bucket_props.set_backend("leveldb");
riak.set_bucket_properties(bucket_props).unwrap();
// get the properties back from the server
let bucket_props = riak.get_bucket_properties("testbucket").unwrap();
let found_backend = bucket_props.get_backend().unwrap();
assert_eq!(found_backend, "leveldb".as_bytes());
// store an object
let contents = ObjectContent::new("this is a test".as_bytes());
let mut req = StoreObjectReq::new("testbucket", contents);
req.set_key("testkey");
riak.store_object(req).unwrap();
// fetch an object
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
let contents = object.content;
let content = contents.first().unwrap();
assert_eq!(content.get_value(), "this is a test".as_bytes());
// delete an object
let req = DeleteObjectReq::new("testbucket", "testkey");
riak.delete_object(req).unwrap();
// make sure deleted object is gone
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
assert_eq!(object.content.len(), 0);
// list the available buckets
let buckets = riak.list_buckets().unwrap();
let mut bucket_exists = false;
for bucket in buckets.iter() {
if *bucket == "testbucket".as_bytes() {
bucket_exists = true;
}
}
assert!(bucket_exists);
// list the available keys
let keys = riak.list_keys("testbucket").unwrap();
let mut key_exists = false;
for key in keys.iter() {
if *key == "testkey".as_bytes() {
key_exists = true;
}
}
assert!(key_exists);
// fetch the preflist for testbucket/testkey
let preflist = riak.fetch_preflist("testbucket", "testkey").unwrap();
let mut lives_on_nodes: u8 = 0;
let mut has_primary_node = false;
for preflist_item in preflist.iter() {
lives_on_nodes = lives_on_nodes + 1;
if preflist_item.is_primary {
has_primary_node = true;
}
}
assert_eq!(lives_on_nodes, 3);
assert!(has_primary_node);
// set properties for a bucket type<|fim▁hole|> bucket_props.set_backend("leveldb");
riak.set_bucket_type_properties(bucket_props).unwrap();
// get the properties back for a bucket type and verify them
let bucket_props = riak.get_bucket_type_properties("testbuckettype").unwrap();
assert_eq!(bucket_props.get_backend().expect("could not get backend"),
"leveldb".as_bytes());
// set a search schema
let mut xml: Vec<u8> = Vec::new();
let mut file = File::open("/tmp/riak-rust-client-default-schema.xml").unwrap();
let _ = file.read_to_end(&mut xml).unwrap();
let schema_name = "schedule".to_string().into_bytes();
riak.set_yokozuna_schema(schema_name.clone(), xml.clone()).unwrap();
// retrieve the search schema
let schema = riak.get_yokozuna_schema(schema_name.clone()).unwrap();
assert_eq!(schema, xml);
// set a search index
let index_name = "myindex".to_string().into_bytes();
let mut index = YokozunaIndex::new(index_name.clone());
index.set_schema(schema_name);
index.set_n_val(3);
riak.set_yokozuna_index(index).unwrap();
// get the search index
let index = riak.get_yokozuna_index(index_name.clone()).unwrap();
assert_eq!(index[0].get_name(), index_name);
// run a search
let mut query = SearchQuery::new("test*", "myindex");
query.set_df("_yz_id");
riak.search(query).unwrap();
// run a MapReduce job
let job = r#"
{"inputs": "bucket_501653", "query": [
{"map": {
"arg": null,
"name": "Riak.mapValuesJson",
"language": "javascript",
"keep": false
}},
{"reduce": {
"arg": null,
"name": "Riak.reduceSum",
"language": "javascript",
"keep": true
}}
]}
"#;
riak.mapreduce(job, "application/json").unwrap();
}<|fim▁end|> | let mut bucket_props = BucketTypeProps::new("testbuckettype"); |
<|file_name|>linktastic.py<|end_file_name|><|fim▁begin|># Linktastic Module
# - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems
#
# Linktastic is distributed under the MIT License. The follow are the terms and conditions of using Linktastic.
#
# The MIT License (MIT)
# Copyright (c) 2012 Solipsis Development
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
from subprocess import CalledProcessError
import os
# Prevent spaces from messing with us!
def _escape_param(param):
return '"%s"' % param
# Private function to create link on nt-based systems
def _link_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _symlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good<|fim▁hole|> 'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _junctionlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
# Create a hard link to src named as dest
# This version of link, unlike os.link, supports nt systems as well
def link(src, dest):
if os.name == 'nt':
_link_windows(src, dest)
else:
os.link(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def symlink(src, dest):
if os.name == 'nt':
_symlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def dirlink(src, dest):
if os.name == 'nt':
_dirlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def junctionlink(src, dest):
if os.name == 'nt':
_junctionlink_windows(src, dest)
else:
os.symlink(src, dest)<|fim▁end|> |
def _dirlink_windows(src, dest):
try:
subprocess.check_output( |
<|file_name|>pl.js<|end_file_name|><|fim▁begin|>/*<|fim▁hole|>CKEDITOR.plugins.setLang( 'autoembed', 'pl', {
embeddingInProgress: 'Osadzanie wklejonego adresu URL...',
embeddingFailed: 'Ten adres URL multimediów nie może być automatycznie osadzony.'
} );<|fim▁end|> | Copyright (c) 2003-2015, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or http://ckeditor.com/license
*/ |
<|file_name|>createpool.py<|end_file_name|><|fim▁begin|>#
# Copyright (C) 2008, 2013 Red Hat, Inc.
# Copyright (C) 2008 Cole Robinson <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
# pylint: disable=E0611
from gi.repository import Gtk
from gi.repository import Gdk
# pylint: enable=E0611
import logging
from virtManager.baseclass import vmmGObjectUI
from virtManager.asyncjob import vmmAsyncJob
from virtManager import uiutil
from virtinst import StoragePool
PAGE_NAME = 0
PAGE_FORMAT = 1
class vmmCreatePool(vmmGObjectUI):
def __init__(self, conn):
vmmGObjectUI.__init__(self, "createpool.ui", "vmm-create-pool")
self.conn = conn
self._pool = None
self.builder.connect_signals({
"on_pool_forward_clicked" : self.forward,
"on_pool_back_clicked" : self.back,
"on_pool_cancel_clicked" : self.close,
"on_vmm_create_pool_delete_event" : self.close,
"on_pool_finish_clicked" : self.forward,
"on_pool_pages_change_page" : self.page_changed,
"on_pool_source_button_clicked" : self.browse_source_path,
"on_pool_target_button_clicked" : self.browse_target_path,
"on_pool_name_activate": self.forward,
"on_pool_hostname_activate" : self.hostname_changed,
"on_pool_iqn_chk_toggled": self.iqn_toggled,
})
self.bind_escape_key_close()
self.set_initial_state()
self.set_page(PAGE_NAME)
def show(self, parent):
logging.debug("Showing new pool wizard")
self.reset_state()
self.topwin.set_transient_for(parent)
self.topwin.present()
def close(self, ignore1=None, ignore2=None):
logging.debug("Closing new pool wizard")
self.topwin.hide()
return 1
def _cleanup(self):
self.conn = None
self._pool = None
def set_initial_state(self):
self.widget("pool-pages").set_show_tabs(False)
blue = Gdk.Color.parse("#0072A8")[1]
self.widget("header").modify_bg(Gtk.StateType.NORMAL, blue)
type_list = self.widget("pool-type")
type_model = Gtk.ListStore(str, str)
type_list.set_model(type_model)
uiutil.set_combo_text_column(type_list, 1)
format_list = self.widget("pool-format")
format_model = Gtk.ListStore(str, str)
format_list.set_model(format_model)
uiutil.set_combo_text_column(format_list, 1)
# Target path combo box entry
target_list = self.widget("pool-target-path")
# target_path, Label, pool class instance
target_model = Gtk.ListStore(str, str, object)
target_model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
target_list.set_model(target_model)
target_list.set_entry_text_column(0)
# Source path combo box entry
source_list = self.widget("pool-source-path")
# source_path, Label, pool class instance
source_model = Gtk.ListStore(str, str, object)
source_model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
source_list.set_model(source_model)
source_list.set_entry_text_column(0)
self.populate_pool_type()
def reset_state(self):
self.widget("pool-pages").set_current_page(0)
self.widget("pool-forward").show()
self.widget("pool-finish").hide()
self.widget("pool-back").set_sensitive(False)
self.widget("pool-name").set_text("")
self.widget("pool-name").grab_focus()
self.widget("pool-type").set_active(0)
self.widget("pool-target-path").get_child().set_text("")
self.widget("pool-source-path").get_child().set_text("")
self.widget("pool-hostname").set_text("")
self.widget("pool-iqn-chk").set_active(False)
self.widget("pool-iqn-chk").toggled()
self.widget("pool-iqn").set_text("")
self.widget("pool-format").set_active(-1)
self.widget("pool-build").set_sensitive(True)
self.widget("pool-build").set_active(False)
self.widget("pool-details-grid").set_visible(False)
def hostname_changed(self, ignore):
# If a hostname was entered, try to lookup valid pool sources.
self.populate_pool_sources()
def iqn_toggled(self, src):
self.widget("pool-iqn").set_sensitive(src.get_active())
def populate_pool_type(self):
model = self.widget("pool-type").get_model()
model.clear()
types = StoragePool.get_pool_types()
types.sort()
for typ in types:
model.append([typ, "%s: %s" %
(typ, StoragePool.get_pool_type_desc(typ))])
def populate_pool_format(self, formats):
model = self.widget("pool-format").get_model()
model.clear()
for f in formats:
model.append([f, f])
def populate_pool_sources(self):
source_list = self.widget("pool-source-path")
source_model = source_list.get_model()
source_model.clear()
target_list = self.widget("pool-target-path")
target_model = target_list.get_model()
target_model.clear()
use_list = source_list
use_model = source_model
entry_list = []
if self._pool.type == StoragePool.TYPE_SCSI:
entry_list = self.list_scsi_adapters()
use_list = source_list
use_model = source_model
elif self._pool.type == StoragePool.TYPE_LOGICAL:
pool_list = self.list_pool_sources()
entry_list = [[p.target_path, p.target_path, p]
for p in pool_list]
use_list = target_list
use_model = target_model
elif self._pool.type == StoragePool.TYPE_DISK:
entry_list = self.list_disk_devs()
use_list = source_list
use_model = source_model
elif self._pool.type == StoragePool.TYPE_NETFS:
host = self.get_config_host()
if host:
pool_list = self.list_pool_sources(host=host)
entry_list = [[p.source_path, p.source_path, p]
for p in pool_list]
use_list = source_list
use_model = source_model
for e in entry_list:
use_model.append(e)
if entry_list:
use_list.set_active(0)
def list_scsi_adapters(self):
scsi_hosts = self.conn.get_nodedevs("scsi_host")
host_list = [dev.host for dev in scsi_hosts]
clean_list = []
for h in host_list:
name = "host%s" % h
tmppool = self._make_stub_pool()
tmppool.source_path = name
entry = [name, name, tmppool]
if name not in [l[0] for l in clean_list]:
clean_list.append(entry)
return clean_list
def list_disk_devs(self):
devs = self.conn.get_nodedevs("storage")
devlist = []
for dev in devs:
if dev.drive_type != "disk" or not dev.block:
continue
devlist.append(dev.block)
devlist.sort()
clean_list = []
for dev in devlist:
tmppool = self._make_stub_pool()
tmppool.source_path = dev
entry = [dev, dev, tmppool]
if dev not in [l[0] for l in clean_list]:
clean_list.append(entry)
return clean_list
def list_pool_sources(self, host=None):
pool_type = self._pool.type
plist = []
try:
plist = StoragePool.pool_list_from_sources(
self.conn.get_backend(),
pool_type,
host=host)
except Exception:
logging.exception("Pool enumeration failed")
return plist
def show_options_by_pool(self):
def show_row(base, do_show):
widget = self.widget(base + "-label")
uiutil.set_grid_row_visible(widget, do_show)
src = self._pool.supports_property("source_path")
src_b = src and not self.conn.is_remote()
src_name = self._pool.type == StoragePool.TYPE_GLUSTER
tgt = self._pool.supports_property("target_path")
tgt_b = tgt and not self.conn.is_remote()
host = self._pool.supports_property("host")
fmt = self._pool.supports_property("formats")
iqn = self._pool.supports_property("iqn")
builddef, buildsens = self.get_build_default()
# Source path broswing is meaningless for net pools
if self._pool.type in [StoragePool.TYPE_NETFS,
StoragePool.TYPE_ISCSI,
StoragePool.TYPE_SCSI]:
src_b = False
show_row("pool-target", tgt)
show_row("pool-source", src)
show_row("pool-hostname", host)
show_row("pool-format", fmt)
show_row("pool-build", buildsens)
show_row("pool-iqn", iqn)
show_row("pool-source-name", src_name)
if tgt:
self.widget("pool-target-path").get_child().set_text(
self._pool.target_path)
self.widget("pool-target-button").set_sensitive(tgt_b)
self.widget("pool-source-button").set_sensitive(src_b)
self.widget("pool-build").set_active(builddef)
if src_name:
self.widget("pool-source-name").get_child().set_text(
self._pool.source_name)
self.widget("pool-format").set_active(-1)
if fmt:
self.populate_pool_format(self._pool.list_formats("formats"))
self.widget("pool-format").set_active(0)
self.populate_pool_sources()
def get_config_type(self):
return uiutil.get_list_selection(self.widget("pool-type"), 0)
def get_config_name(self):
return self.widget("pool-name").get_text()
def get_config_target_path(self):
src = self.widget("pool-target-path")
if not src.get_sensitive():
return None
ret = uiutil.get_list_selection(src, 1)
if ret is not None:
return ret
return src.get_child().get_text()
def get_config_source_path(self):
src = self.widget("pool-source-path")
if not src.get_sensitive():
return None
ret = uiutil.get_list_selection(src, 1)
if ret is not None:
return ret
return src.get_child().get_text().strip()
def get_config_host(self):
host = self.widget("pool-hostname")
if host.get_sensitive():
return host.get_text().strip()
return None
def get_config_source_name(self):
name = self.widget("pool-source-name")
if name.get_sensitive():
return name.get_text().strip()
return None
def get_config_format(self):
return uiutil.get_list_selection(self.widget("pool-format"), 0)
def get_config_iqn(self):
iqn = self.widget("pool-iqn")
if iqn.get_sensitive() and iqn.get_visible():
return iqn.get_text().strip()
return None
def get_build_default(self):
""" Return (default value, whether build option can be changed)"""
if not self._pool:
return (False, False)
if self._pool.type in [StoragePool.TYPE_DIR,<|fim▁hole|> elif self._pool.type in [StoragePool.TYPE_LOGICAL,
StoragePool.TYPE_DISK]:
# This is a dangerous operation, anything (False, True)
# should be assumed to be one.
return (False, True)
else:
return (False, False)
def browse_source_path(self, ignore1=None):
source = self._browse_file(_("Choose source path"),
startfolder="/dev", foldermode=False)
if source:
self.widget("pool-source-path").get_child().set_text(source)
def browse_target_path(self, ignore1=None):
target = self._browse_file(_("Choose target directory"),
startfolder="/var/lib/libvirt",
foldermode=True)
if target:
self.widget("pool-target-path").get_child().set_text(target)
def forward(self, ignore=None):
notebook = self.widget("pool-pages")
try:
if self.validate(notebook.get_current_page()) is not True:
return
if notebook.get_current_page() == PAGE_FORMAT:
self.finish()
else:
notebook.next_page()
except Exception, e:
self.err.show_err(_("Uncaught error validating input: %s") % str(e))
return
def back(self, ignore=None):
self.widget("pool-pages").prev_page()
def _finish_cb(self, error, details):
self.topwin.set_sensitive(True)
self.topwin.get_window().set_cursor(
Gdk.Cursor.new(Gdk.CursorType.TOP_LEFT_ARROW))
if error:
error = _("Error creating pool: %s") % error
self.err.show_err(error,
details=details)
else:
self.conn.schedule_priority_tick(pollpool=True)
self.close()
def finish(self):
self.topwin.set_sensitive(False)
self.topwin.get_window().set_cursor(
Gdk.Cursor.new(Gdk.CursorType.WATCH))
build = self.widget("pool-build").get_active()
progWin = vmmAsyncJob(self._async_pool_create, [build],
self._finish_cb, [],
_("Creating storage pool..."),
_("Creating the storage pool may take a "
"while..."),
self.topwin)
progWin.run()
def _async_pool_create(self, asyncjob, build):
meter = asyncjob.get_meter()
logging.debug("Starting backround pool creation.")
poolobj = self._pool.install(create=True, meter=meter, build=build)
poolobj.setAutostart(True)
logging.debug("Pool creation succeeded")
def set_page(self, page_number):
# Update page number
page_lbl = ("<span color='#59B0E2'>%s</span>" %
_("Step %(current_page)d of %(max_page)d") %
{'current_page': page_number + 1,
'max_page': PAGE_FORMAT + 1})
self.widget("header-pagenum").set_markup(page_lbl)
isfirst = (page_number == PAGE_NAME)
islast = (page_number == PAGE_FORMAT)
self.widget("pool-back").set_sensitive(not isfirst)
self.widget("pool-finish").set_visible(islast)
self.widget("pool-forward").set_visible(not islast)
self.widget(islast and "pool-finish" or "pool-forward").grab_focus()
self.widget("pool-details-grid").set_visible(islast)
if islast:
self.show_options_by_pool()
def page_changed(self, notebook_ignore, page_ignore, page_number):
self.set_page(page_number)
def get_pool_to_validate(self):
"""
Return a pool instance to use for parameter assignment validation.
For most pools this will be the one we built after step 1, but for
pools we find via FindPoolSources, this will be different
"""
source_list = self.widget("pool-source-path")
target_list = self.widget("pool-target-path")
pool = uiutil.get_list_selection(source_list, 2)
if pool is None:
pool = uiutil.get_list_selection(target_list, 2)
return pool
def _make_stub_pool(self):
pool = StoragePool(self.conn.get_backend())
pool.type = self.get_config_type()
return pool
def _validate_page_name(self, usepool=None):
try:
if usepool:
self._pool = usepool
else:
self._pool = self._make_stub_pool()
self._pool.name = self.get_config_name()
except ValueError, e:
return self.err.val_err(_("Pool Parameter Error"), e)
return True
def _validate_page_format(self):
target = self.get_config_target_path()
host = self.get_config_host()
source = self.get_config_source_path()
fmt = self.get_config_format()
iqn = self.get_config_iqn()
source_name = self.get_config_source_name()
if not self._validate_page_name(self.get_pool_to_validate()):
return
try:
self._pool.target_path = target
if host:
self._pool.host = host
if source:
self._pool.source_path = source
if fmt:
self._pool.format = fmt
if iqn:
self._pool.iqn = iqn
if source_name:
self._pool.source_name = source_name
self._pool.validate()
except ValueError, e:
return self.err.val_err(_("Pool Parameter Error"), e)
buildval = self.widget("pool-build").get_active()
buildsen = (self.widget("pool-build").get_sensitive() and
self.widget("pool-build").get_visible())
if buildsen and buildval:
ret = self.err.yes_no(_("Building a pool of this type will "
"format the source device. Are you "
"sure you want to 'build' this pool?"))
if not ret:
return ret
return True
def validate(self, page):
if page == PAGE_NAME:
return self._validate_page_name()
elif page == PAGE_FORMAT:
return self._validate_page_format()
def _browse_file(self, dialog_name, startfolder=None, foldermode=False):
mode = Gtk.FileChooserAction.OPEN
if foldermode:
mode = Gtk.FileChooserAction.SELECT_FOLDER
return self.err.browse_local(self.conn, dialog_name,
dialog_type=mode, start_folder=startfolder)<|fim▁end|> | StoragePool.TYPE_FS,
StoragePool.TYPE_NETFS]:
# Building for these simply entails creating a directory
return (True, False) |
<|file_name|>AdminRouter.js<|end_file_name|><|fim▁begin|>define([
'jquery',
'underscore',
'backbone',
'views/AdminView',
'authentication',
'models/Beach'
], function ( $, _, Backbone, AdminView, Authentication, BeachModel) {
var AdminRouter = Backbone.Router.extend({
routes: {<|fim▁hole|> 'admin' : 'index'
},
index: function () {
Authentication.authorize(function () {
$('#content').html("<p style='display: block; font-size: 15%; text-align: center; line-height: 100vh; margin: 0;'>LOADING</p>");
beaches = new BeachModel.Collection();
beaches.fetch( {
success: function( collection, response, options) {
var adminView = new AdminView({ collection: collection });
$('#content').html(adminView.el);
},
failure: function( collection, response, options) {
$('#content').html("An error has occured.");
}
});
}, true);
},
});
return AdminRouter;
});<|fim▁end|> | |
<|file_name|>0002_transaction_response.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0001_initial'),
]
<|fim▁hole|> migrations.AddField(
model_name='transaction',
name='response',
field=models.CharField(default=b'', max_length=4, null=True, blank=True),
),
]<|fim▁end|> | operations = [ |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.extensions import PageExtensionAdmin, TitleExtensionAdmin
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .forms import TitleMetaAdminForm
from .models import PageMeta, TitleMeta
class PageMetaAdmin(PageExtensionAdmin):
raw_id_fields = ('og_author',)
fieldsets = (
(None, {'fields': ('image',)}),
(_('OpenGraph'), {
'fields': (
'og_type', ('og_author', 'og_author_url', 'og_author_fbid'),
('og_publisher', 'og_app_id')
),
'classes': ('collapse',)
}),
(_('Twitter Cards'), {
'fields': ('twitter_type', 'twitter_author'),
'classes': ('collapse',)
}),
(_('Google+ Snippets'), {
'fields': ('gplus_type', 'gplus_author'),
'classes': ('collapse',)
}),
)
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(PageMeta, PageMetaAdmin)
class TitleMetaAdmin(TitleExtensionAdmin):
form = TitleMetaAdminForm
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
<|fim▁hole|><|fim▁end|> | admin.site.register(TitleMeta, TitleMetaAdmin) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import os
import sys
import logging
import inspect
from inspect import getmembers, isfunction
from commands import command
import handlers
logger = logging.getLogger(__name__)
class tracker:
def __init__(self):
self.bot = None
self.list = []
self.reset()
def set_bot(self, bot):
self.bot = bot
def reset(self):
self._current = {
"commands": {
"admin": [],
"user": [],
"all": None
},
"handlers": [],
"shared": [],
"metadata": None
}
def start(self, metadata):
self.reset()
self._current["metadata"] = metadata
def current(self):
self._current["commands"]["all"] = list(
set(self._current["commands"]["admin"] +
self._current["commands"]["user"]))
return self._current
def end(self):
self.list.append(self.current())
def register_command(self, type, command_names):
"""call during plugin init to register commands"""
self._current["commands"][type].extend(command_names)
self._current["commands"][type] = list(set(self._current["commands"][type]))
def register_handler(self, function, type, priority):
self._current["handlers"].append((function, type, priority))
def register_shared(self, id, objectref, forgiving):
self._current["shared"].append((id, objectref, forgiving))
tracking = tracker()
"""helpers"""
def register_user_command(command_names):
"""user command registration"""
if not isinstance(command_names, list):
command_names = [command_names]
tracking.register_command("user", command_names)
def register_admin_command(command_names):
"""admin command registration, overrides user command registration"""
if not isinstance(command_names, list):
command_names = [command_names]
tracking.register_command("admin", command_names)
def register_handler(function, type="message", priority=50):
"""register external handler"""
bot_handlers = tracking.bot._handlers
bot_handlers.register_handler(function, type, priority)
def register_shared(id, objectref, forgiving=True):
"""register shared object"""
bot = tracking.bot
bot.register_shared(id, objectref, forgiving=forgiving)
"""plugin loader"""
def retrieve_all_plugins(plugin_path=None, must_start_with=False):
"""recursively loads all plugins from the standard plugins path
* a plugin file or folder must not begin with . or _
* a subfolder containing a plugin must have an __init__.py file
* sub-plugin files (additional plugins inside a subfolder) must be prefixed with the
plugin/folder name for it to be automatically loaded
"""
if not plugin_path:
plugin_path = os.path.dirname(os.path.realpath(sys.argv[0])) + os.sep + "plugins"
plugin_list = []
nodes = os.listdir(plugin_path)
for node_name in nodes:
full_path = os.path.join(plugin_path, node_name)
module_names = [ os.path.splitext(node_name)[0] ] # node_name without .py extension
<|fim▁hole|>
if must_start_with and not node_name.startswith(must_start_with):
continue
if os.path.isfile(full_path):
if not node_name.endswith(".py"):
continue
else:
if not os.path.isfile(os.path.join(full_path, "__init__.py")):
continue
for sm in retrieve_all_plugins(full_path, must_start_with=node_name):
module_names.append(module_names[0] + "." + sm)
plugin_list.extend(module_names)
logger.debug("retrieved {}: {}.{}".format(len(plugin_list), must_start_with or "plugins", plugin_list))
return plugin_list
def get_configured_plugins(bot):
all_plugins = retrieve_all_plugins()
config_plugins = bot.get_config_option('plugins')
if config_plugins is None: # must be unset in config or null
logger.info("plugins is not defined, using ALL")
plugin_list = all_plugins
else:
"""perform fuzzy matching with actual retrieved plugins, e.g. "abc" matches "xyz.abc"
if more than one match found, don't load plugin
"""
plugins_included = []
plugins_excluded = all_plugins
plugin_name_ambiguous = []
plugin_name_not_found = []
for configured in config_plugins:
dotconfigured = "." + configured
matches = []
for found in plugins_excluded:
fullfound = "plugins." + found
if fullfound.endswith(dotconfigured):
matches.append(found)
num_matches = len(matches)
if num_matches <= 0:
logger.debug("{} no match".format(configured))
plugin_name_not_found.append(configured)
elif num_matches == 1:
logger.debug("{} matched to {}".format(configured, matches[0]))
plugins_included.append(matches[0])
plugins_excluded.remove(matches[0])
else:
logger.debug("{} ambiguous, matches {}".format(configured, matches))
plugin_name_ambiguous.append(configured)
if plugins_excluded:
logger.info("excluded {}: {}".format(len(plugins_excluded), plugins_excluded))
if plugin_name_ambiguous:
logger.warning("ambiguous plugin names: {}".format(plugin_name_ambiguous))
if plugin_name_not_found:
logger.warning("plugin not found: {}".format(plugin_name_not_found))
plugin_list = plugins_included
logger.info("included {}: {}".format(len(plugin_list), plugin_list))
return plugin_list
def load(bot, command_dispatcher):
"""load plugins and perform any initialisation required to set them up"""
tracking.set_bot(bot)
command_dispatcher.set_tracking(tracking)
plugin_list = get_configured_plugins(bot)
for module in plugin_list:
module_path = "plugins.{}".format(module)
tracking.start({ "module": module, "module.path": module_path })
try:
exec("import {}".format(module_path))
except Exception as e:
logger.exception("EXCEPTION during plugin import: {}".format(module_path))
continue
public_functions = [o for o in getmembers(sys.modules[module_path], isfunction)]
candidate_commands = []
"""pass 1: run optional callable: _initialise, _initialize
* performs house-keeping tasks (e.g. migration, tear-up, pre-init, etc)
* registers user and/or admin commands
"""
available_commands = False # default: ALL
try:
for function_name, the_function in public_functions:
if function_name == "_initialise" or function_name == "_initialize":
"""accepted function signatures:
CURRENT
version >= 2.4 | function()
version >= 2.4 | function(bot) - parameter must be named "bot"
LEGACY
version <= 2.4 | function(handlers, bot)
ancient | function(handlers)
"""
_expected = list(inspect.signature(the_function).parameters)
if len(_expected) == 0:
the_function()
_return = []
elif len(_expected) == 1 and _expected[0] == "bot":
the_function(bot)
_return = []
else:
try:
# legacy support, pre-2.4
_return = the_function(bot._handlers, bot)
except TypeError as e:
# legacy support, ancient plugins
_return = the_function(bot._handlers)
if type(_return) is list:
available_commands = _return
elif function_name.startswith("_"):
pass
else:
candidate_commands.append((function_name, the_function))
if available_commands is False:
# implicit init, legacy support: assume all candidate_commands are user-available
register_user_command([function_name for function_name, function in candidate_commands])
elif available_commands is []:
# explicit init, no user-available commands
pass
else:
# explicit init, legacy support: _initialise() returned user-available commands
register_user_command(available_commands)
except Exception as e:
logger.exception("EXCEPTION during plugin init: {}".format(module_path))
continue # skip this, attempt next plugin
"""
pass 2: register filtered functions
tracking.current() and the CommandDispatcher registers might be out of sync if a
combination of decorators and register_user_command/register_admin_command is used since
decorators execute immediately upon import
"""
plugin_tracking = tracking.current()
explicit_admin_commands = plugin_tracking["commands"]["admin"]
all_commands = plugin_tracking["commands"]["all"]
registered_commands = []
for function_name, the_function in candidate_commands:
if function_name in all_commands:
is_admin = False
text_function_name = function_name
if function_name in explicit_admin_commands:
is_admin = True
text_function_name = "*" + text_function_name
command_dispatcher.register(the_function, admin=is_admin)
registered_commands.append(text_function_name)
if registered_commands:
logger.info("{} - {}".format(module, ", ".join(registered_commands)))
else:
logger.info("{} - no commands".format(module))
tracking.end()
@command.register(admin=True)
def plugininfo(bot, event, *args):
"""dumps plugin information"""
lines = []
for plugin in tracking.list:
if len(args) == 0 or args[0] in plugin["metadata"]["module"]:
lines.append("<b>{}</b>".format(plugin["metadata"]["module.path"]))
"""admin commands"""
if len(plugin["commands"]["admin"]) > 0:
lines.append("<i>admin commands:</i> {}".format(", ".join(plugin["commands"]["admin"])))
"""user-only commands"""
user_only_commands = list(set(plugin["commands"]["user"]) - set(plugin["commands"]["admin"]))
if len(user_only_commands) > 0:
lines.append("<i>user commands:</i> {}".format(", ".join(user_only_commands)))
"""handlers"""
if len(plugin["handlers"]) > 0:
lines.append("<i>handlers:</i>" + ", ".join([ "{} ({}, p={})".format(f[0].__name__, f[1], str(f[2])) for f in plugin["handlers"]]))
"""shared"""
if len(plugin["shared"]) > 0:
lines.append("<i>shared:</i>" + ", ".join([f[1].__name__ for f in plugin["shared"]]))
lines.append("")
bot.send_html_to_conversation(event.conv_id, "<br />".join(lines))<|fim▁end|> | if node_name.startswith(("_", ".")):
continue |
<|file_name|>zookeeper.go<|end_file_name|><|fim▁begin|>package zookeeper
import (
"strings"
"time"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
zk "github.com/samuel/go-zookeeper/zk"
)
const (
// SOH control character
SOH = "\x01"
defaultTimeout = 10 * time.Second
)
// Zookeeper is the receiver type for
// the Store interface
type Zookeeper struct {
timeout time.Duration
client *zk.Conn
}
type zookeeperLock struct {
client *zk.Conn
lock *zk.Lock
key string
value []byte
}
// Register registers zookeeper to libkv
func Register() {
libkv.AddStore(store.ZK, New)
}
// New creates a new Zookeeper client given a
// list of endpoints and an optional tls config
func New(endpoints []string, options *store.Config) (store.Store, error) {
s := &Zookeeper{}
s.timeout = defaultTimeout
// Set options
if options != nil {
if options.ConnectionTimeout != 0 {
s.setTimeout(options.ConnectionTimeout)
}
}
// Connect to Zookeeper
conn, _, err := zk.Connect(endpoints, s.timeout)
if err != nil {
return nil, err
}
s.client = conn
return s, nil
}
// setTimeout sets the timeout for connecting to Zookeeper
func (s *Zookeeper) setTimeout(time time.Duration) {
s.timeout = time
}
// Get the value at "key", returns the last modified index
// to use in conjunction to Atomic calls
func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) {
resp, meta, err := s.client.Get(s.normalize(key))
if err != nil {
if err == zk.ErrNoNode {
return nil, store.ErrKeyNotFound
}
return nil, err
}
// FIXME handle very rare cases where Get returns the
// SOH control character instead of the actual value
if string(resp) == SOH {
return s.Get(store.Normalize(key))
}
pair = &store.KVPair{
Key: key,
Value: resp,
LastIndex: uint64(meta.Version),
}
return pair, nil
}
// createFullPath creates the entire path for a directory
// that does not exist
func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error {
for i := 1; i <= len(path); i++ {
newpath := "/" + strings.Join(path[:i], "/")
if i == len(path) && ephemeral {
_, err := s.client.Create(newpath, []byte{}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll))
return err
}
_, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Skip if node already exists
if err != zk.ErrNodeExists {
return err
}
}
}
return nil
}
// Put a value at "key"
func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error {
fkey := s.normalize(key)
exists, err := s.Exists(key)
if err != nil {
return err
}
if !exists {
if opts != nil && opts.TTL > 0 {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), true)
} else {
s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), false)
}
}
_, err = s.client.Set(fkey, value, -1)
return err
}
// Delete a value at "key"
func (s *Zookeeper) Delete(key string) error {
err := s.client.Delete(s.normalize(key), -1)
if err == zk.ErrNoNode {
return store.ErrKeyNotFound
}
return err
}
// Exists checks if the key exists inside the store
func (s *Zookeeper) Exists(key string) (bool, error) {
exists, _, err := s.client.Exists(s.normalize(key))
if err != nil {
return false, err
}
return exists, nil
}
// Watch for changes on a "key"
// It returns a channel that will receive changes or pass
// on errors. Upon creation, the current value will first
// be sent to the channel. Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
// Get the key first
pair, err := s.Get(key)
if err != nil {
return nil, err
}
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan *store.KVPair)
go func() {
defer close(watchCh)
// Get returns the current value to the channel prior
// to listening to any event that may occur on that key
watchCh <- pair
for {
_, _, eventCh, err := s.client.GetW(s.normalize(key))
if err != nil {
return
}
select {
case e := <-eventCh:
if e.Type == zk.EventNodeDataChanged {
if entry, err := s.Get(key); err == nil {
watchCh <- entry
}
}
case <-stopCh:
// There is no way to stop GetW so just quit
return
}
}
}()
return watchCh, nil
}
<|fim▁hole|>// on errors. Upon creating a watch, the current childs values
// will be sent to the channel .Providing a non-nil stopCh can
// be used to stop watching.
func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
// List the childrens first
entries, err := s.List(directory)
if err != nil {
return nil, err
}
// Catch zk notifications and fire changes into the channel.
watchCh := make(chan []*store.KVPair)
go func() {
defer close(watchCh)
// List returns the children values to the channel
// prior to listening to any events that may occur
// on those keys
watchCh <- entries
for {
_, _, eventCh, err := s.client.ChildrenW(s.normalize(directory))
if err != nil {
return
}
select {
case e := <-eventCh:
if e.Type == zk.EventNodeChildrenChanged {
if kv, err := s.List(directory); err == nil {
watchCh <- kv
}
}
case <-stopCh:
// There is no way to stop GetW so just quit
return
}
}
}()
return watchCh, nil
}
// List child nodes of a given directory
func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
keys, stat, err := s.client.Children(s.normalize(directory))
if err != nil {
if err == zk.ErrNoNode {
return nil, store.ErrKeyNotFound
}
return nil, err
}
kv := []*store.KVPair{}
// FIXME Costly Get request for each child key..
for _, key := range keys {
pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key))
if err != nil {
// If node is not found: List is out of date, retry
if err == zk.ErrNoNode {
return s.List(directory)
}
return nil, err
}
kv = append(kv, &store.KVPair{
Key: key,
Value: []byte(pair.Value),
LastIndex: uint64(stat.Version),
})
}
return kv, nil
}
// DeleteTree deletes a range of keys under a given directory
func (s *Zookeeper) DeleteTree(directory string) error {
pairs, err := s.List(directory)
if err != nil {
return err
}
var reqs []interface{}
for _, pair := range pairs {
reqs = append(reqs, &zk.DeleteRequest{
Path: s.normalize(directory + "/" + pair.Key),
Version: -1,
})
}
_, err = s.client.Multi(reqs...)
return err
}
// AtomicPut put a value at "key" if the key has not been
// modified in the meantime, throws an error if this is the case
func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) {
var lastIndex uint64
if previous != nil {
meta, err := s.client.Set(s.normalize(key), value, int32(previous.LastIndex))
if err != nil {
// Compare Failed
if err == zk.ErrBadVersion {
return false, nil, store.ErrKeyModified
}
return false, nil, err
}
lastIndex = uint64(meta.Version)
} else {
// Interpret previous == nil as create operation.
_, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll))
if err != nil {
// Zookeeper will complain if the directory doesn't exist.
if err == zk.ErrNoNode {
// Create the directory
parts := store.SplitKey(strings.TrimSuffix(key, "/"))
parts = parts[:len(parts)-1]
if err = s.createFullPath(parts, false); err != nil {
// Failed to create the directory.
return false, nil, err
}
if _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)); err != nil {
return false, nil, err
}
} else {
// Unhandled error
return false, nil, err
}
}
lastIndex = 0 // Newly created nodes have version 0.
}
pair := &store.KVPair{
Key: key,
Value: value,
LastIndex: lastIndex,
}
return true, pair, nil
}
// AtomicDelete deletes a value at "key" if the key
// has not been modified in the meantime, throws an
// error if this is the case
func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
if previous == nil {
return false, store.ErrPreviousNotSpecified
}
err := s.client.Delete(s.normalize(key), int32(previous.LastIndex))
if err != nil {
if err == zk.ErrBadVersion {
return false, store.ErrKeyModified
}
return false, err
}
return true, nil
}
// NewLock returns a handle to a lock struct which can
// be used to provide mutual exclusion on a key
func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) {
value := []byte("")
// Apply options
if options != nil {
if options.Value != nil {
value = options.Value
}
}
lock = &zookeeperLock{
client: s.client,
key: s.normalize(key),
value: value,
lock: zk.NewLock(s.client, s.normalize(key), zk.WorldACL(zk.PermAll)),
}
return lock, err
}
// Lock attempts to acquire the lock and blocks while
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
err := l.lock.Lock()
if err == nil {
// We hold the lock, we can set our value
// FIXME: The value is left behind
// (problematic for leader election)
_, err = l.client.Set(l.key, l.value, -1)
}
return make(chan struct{}), err
}
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *zookeeperLock) Unlock() error {
return l.lock.Unlock()
}
// Close closes the client connection
func (s *Zookeeper) Close() {
s.client.Close()
}
// Normalize the key for usage in Zookeeper
func (s *Zookeeper) normalize(key string) string {
key = store.Normalize(key)
return strings.TrimSuffix(key, "/")
}<|fim▁end|> | // WatchTree watches for changes on a "directory"
// It returns a channel that will receive changes or pass |
<|file_name|>test_ptrace.rs<|end_file_name|><|fim▁begin|>use nix::errno::Errno;
use nix::unistd::getpid;
use nix::sys::ptrace;
#[cfg(any(target_os = "android", target_os = "linux"))]
use nix::sys::ptrace::Options;
#[cfg(any(target_os = "android", target_os = "linux"))]
use std::mem;
use crate::*;
#[test]
fn test_ptrace() {
// Just make sure ptrace can be called at all, for now.
// FIXME: qemu-user doesn't implement ptrace on all arches, so permit ENOSYS
require_capability!("test_ptrace", CAP_SYS_PTRACE);
let err = ptrace::attach(getpid()).unwrap_err();
assert!(err == Errno::EPERM || err == Errno::EINVAL ||
err == Errno::ENOSYS);
}
// Just make sure ptrace_setoptions can be called at all, for now.
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_ptrace_setoptions() {
require_capability!("test_ptrace_setoptions", CAP_SYS_PTRACE);
let err = ptrace::setoptions(getpid(), Options::PTRACE_O_TRACESYSGOOD).unwrap_err();
assert!(err != Errno::EOPNOTSUPP);
}
// Just make sure ptrace_getevent can be called at all, for now.
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_ptrace_getevent() {
require_capability!("test_ptrace_getevent", CAP_SYS_PTRACE);
let err = ptrace::getevent(getpid()).unwrap_err();
assert!(err != Errno::EOPNOTSUPP);
}
// Just make sure ptrace_getsiginfo can be called at all, for now.
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_ptrace_getsiginfo() {
require_capability!("test_ptrace_getsiginfo", CAP_SYS_PTRACE);
if let Err(Errno::EOPNOTSUPP) = ptrace::getsiginfo(getpid()) {
panic!("ptrace_getsiginfo returns Errno::EOPNOTSUPP!");
}
}
// Just make sure ptrace_setsiginfo can be called at all, for now.
#[test]
#[cfg(any(target_os = "android", target_os = "linux"))]
fn test_ptrace_setsiginfo() {
require_capability!("test_ptrace_setsiginfo", CAP_SYS_PTRACE);
let siginfo = unsafe { mem::zeroed() };
if let Err(Errno::EOPNOTSUPP) = ptrace::setsiginfo(getpid(), &siginfo) {
panic!("ptrace_setsiginfo returns Errno::EOPNOTSUPP!");
}
}
#[test]
fn test_ptrace_cont() {
use nix::sys::ptrace;
use nix::sys::signal::{raise, Signal};
use nix::sys::wait::{waitpid, WaitPidFlag, WaitStatus};
use nix::unistd::fork;
use nix::unistd::ForkResult::*;
require_capability!("test_ptrace_cont", CAP_SYS_PTRACE);
let _m = crate::FORK_MTX.lock();
// FIXME: qemu-user doesn't implement ptrace on all architectures
// and returns ENOSYS in this case.
// We (ab)use this behavior to detect the affected platforms
// and skip the test then.
// On valid platforms the ptrace call should return Errno::EPERM, this
// is already tested by `test_ptrace`.
let err = ptrace::attach(getpid()).unwrap_err();
if err == Errno::ENOSYS {
return;
}
match unsafe{fork()}.expect("Error: Fork Failed") {
Child => {
ptrace::traceme().unwrap();
// As recommended by ptrace(2), raise SIGTRAP to pause the child
// until the parent is ready to continue
loop {
raise(Signal::SIGTRAP).unwrap();
}
},<|fim▁hole|> assert_eq!(waitpid(child, None), Ok(WaitStatus::Stopped(child, Signal::SIGTRAP)));
ptrace::cont(child, None).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::Stopped(child, Signal::SIGTRAP)));
ptrace::cont(child, Some(Signal::SIGKILL)).unwrap();
match waitpid(child, None) {
Ok(WaitStatus::Signaled(pid, Signal::SIGKILL, _)) if pid == child => {
// FIXME It's been observed on some systems (apple) the
// tracee may not be killed but remain as a zombie process
// affecting other wait based tests. Add an extra kill just
// to make sure there are no zombies.
let _ = waitpid(child, Some(WaitPidFlag::WNOHANG));
while ptrace::cont(child, Some(Signal::SIGKILL)).is_ok() {
let _ = waitpid(child, Some(WaitPidFlag::WNOHANG));
}
}
_ => panic!("The process should have been killed"),
}
},
}
}
#[cfg(target_os = "linux")]
#[test]
fn test_ptrace_interrupt() {
use nix::sys::ptrace;
use nix::sys::signal::Signal;
use nix::sys::wait::{waitpid, WaitPidFlag, WaitStatus};
use nix::unistd::fork;
use nix::unistd::ForkResult::*;
use std::thread::sleep;
use std::time::Duration;
require_capability!("test_ptrace_interrupt", CAP_SYS_PTRACE);
let _m = crate::FORK_MTX.lock();
match unsafe{fork()}.expect("Error: Fork Failed") {
Child => {
loop {
sleep(Duration::from_millis(1000));
}
},
Parent { child } => {
ptrace::seize(child, ptrace::Options::PTRACE_O_TRACESYSGOOD).unwrap();
ptrace::interrupt(child).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::PtraceEvent(child, Signal::SIGTRAP, 128)));
ptrace::syscall(child, None).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::PtraceSyscall(child)));
ptrace::detach(child, Some(Signal::SIGKILL)).unwrap();
match waitpid(child, None) {
Ok(WaitStatus::Signaled(pid, Signal::SIGKILL, _)) if pid == child => {
let _ = waitpid(child, Some(WaitPidFlag::WNOHANG));
while ptrace::cont(child, Some(Signal::SIGKILL)).is_ok() {
let _ = waitpid(child, Some(WaitPidFlag::WNOHANG));
}
}
_ => panic!("The process should have been killed"),
}
},
}
}
// ptrace::{setoptions, getregs} are only available in these platforms
#[cfg(all(target_os = "linux",
any(target_arch = "x86_64",
target_arch = "x86"),
target_env = "gnu"))]
#[test]
fn test_ptrace_syscall() {
use nix::sys::signal::kill;
use nix::sys::ptrace;
use nix::sys::signal::Signal;
use nix::sys::wait::{waitpid, WaitStatus};
use nix::unistd::fork;
use nix::unistd::getpid;
use nix::unistd::ForkResult::*;
require_capability!("test_ptrace_syscall", CAP_SYS_PTRACE);
let _m = crate::FORK_MTX.lock();
match unsafe{fork()}.expect("Error: Fork Failed") {
Child => {
ptrace::traceme().unwrap();
// first sigstop until parent is ready to continue
let pid = getpid();
kill(pid, Signal::SIGSTOP).unwrap();
kill(pid, Signal::SIGTERM).unwrap();
unsafe { ::libc::_exit(0); }
},
Parent { child } => {
assert_eq!(waitpid(child, None), Ok(WaitStatus::Stopped(child, Signal::SIGSTOP)));
// set this option to recognize syscall-stops
ptrace::setoptions(child, ptrace::Options::PTRACE_O_TRACESYSGOOD).unwrap();
#[cfg(target_arch = "x86_64")]
let get_syscall_id = || ptrace::getregs(child).unwrap().orig_rax as libc::c_long;
#[cfg(target_arch = "x86")]
let get_syscall_id = || ptrace::getregs(child).unwrap().orig_eax as libc::c_long;
// kill entry
ptrace::syscall(child, None).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::PtraceSyscall(child)));
assert_eq!(get_syscall_id(), ::libc::SYS_kill);
// kill exit
ptrace::syscall(child, None).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::PtraceSyscall(child)));
assert_eq!(get_syscall_id(), ::libc::SYS_kill);
// receive signal
ptrace::syscall(child, None).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::Stopped(child, Signal::SIGTERM)));
// inject signal
ptrace::syscall(child, Signal::SIGTERM).unwrap();
assert_eq!(waitpid(child, None), Ok(WaitStatus::Signaled(child, Signal::SIGTERM, false)));
},
}
}<|fim▁end|> | Parent { child } => { |
<|file_name|>ProductApplicationServiceTest.java<|end_file_name|><|fim▁begin|>// Copyright 2012,2013 Vaughn Vernon
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.saasovation.agilepm.application.product;
import java.util.UUID;
import com.saasovation.agilepm.application.ProductApplicationCommonTest;
import com.saasovation.agilepm.domain.model.discussion.DiscussionAvailability;
import com.saasovation.agilepm.domain.model.product.Product;
import com.saasovation.agilepm.domain.model.product.ProductId;
import com.saasovation.agilepm.domain.model.team.ProductOwner;
public class ProductApplicationServiceTest extends ProductApplicationCommonTest {
public ProductApplicationServiceTest() {
super();
}
public void testDiscussionProcess() throws Exception {
Product product = this.persistedProductForTest();
this.productApplicationService.requestProductDiscussion(
new RequestProductDiscussionCommand(
product.tenantId().id(),
product.productId().id()));
this.productApplicationService.startDiscussionInitiation(
new StartDiscussionInitiationCommand(
product.tenantId().id(),
product.productId().id()));
Product productWithStartedDiscussionInitiation =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertNotNull(productWithStartedDiscussionInitiation.discussionInitiationId());
String discussionId = UUID.randomUUID().toString().toUpperCase();
InitiateDiscussionCommand command =
new InitiateDiscussionCommand(
product.tenantId().id(),
product.productId().id(),
discussionId);
this.productApplicationService.initiateDiscussion(command);
Product productWithInitiatedDiscussion =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertEquals(discussionId, productWithInitiatedDiscussion.discussion().descriptor().id());
}
public void testNewProduct() throws Exception {
ProductOwner productOwner = this.persistedProductOwnerForTest();
String newProductId =
this.productApplicationService.newProduct(
new NewProductCommand(
"T-12345",
productOwner.productOwnerId().id(),
"My Product",
"The description of My Product."));
Product newProduct =
this.productRepository
.productOfId(
productOwner.tenantId(),
new ProductId(newProductId));
assertNotNull(newProduct);
assertEquals("My Product", newProduct.name());
assertEquals("The description of My Product.", newProduct.description());
}
public void testNewProductWithDiscussion() throws Exception {
ProductOwner productOwner = this.persistedProductOwnerForTest();
String newProductId =
this.productApplicationService.newProductWithDiscussion(
new NewProductCommand(
"T-12345",
productOwner.productOwnerId().id(),
"My Product",
"The description of My Product."));
Product newProduct =
this.productRepository
.productOfId(
productOwner.tenantId(),
new ProductId(newProductId));
assertNotNull(newProduct);
assertEquals("My Product", newProduct.name());
assertEquals("The description of My Product.", newProduct.description());
assertEquals(DiscussionAvailability.REQUESTED, newProduct.discussion().availability());
}
public void testRequestProductDiscussion() throws Exception {
Product product = this.persistedProductForTest();
this.productApplicationService.requestProductDiscussion(
new RequestProductDiscussionCommand(
product.tenantId().id(),
product.productId().id()));
Product productWithRequestedDiscussion =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertEquals(DiscussionAvailability.REQUESTED, productWithRequestedDiscussion.discussion().availability());
}
public void testRetryProductDiscussionRequest() throws Exception {
Product product = this.persistedProductForTest();
this.productApplicationService.requestProductDiscussion(
new RequestProductDiscussionCommand(
product.tenantId().id(),
product.productId().id()));
Product productWithRequestedDiscussion =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertEquals(DiscussionAvailability.REQUESTED, productWithRequestedDiscussion.discussion().availability());
this.productApplicationService.startDiscussionInitiation(
new StartDiscussionInitiationCommand(
product.tenantId().id(),
product.productId().id()));
Product productWithDiscussionInitiation =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertNotNull(productWithDiscussionInitiation.discussionInitiationId());
this.productApplicationService.retryProductDiscussionRequest(
new RetryProductDiscussionRequestCommand(
product.tenantId().id(),
productWithDiscussionInitiation.discussionInitiationId()));
Product productWithRetriedRequestedDiscussion =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertEquals(DiscussionAvailability.REQUESTED, productWithRetriedRequestedDiscussion.discussion().availability());
}
public void testStartDiscussionInitiation() throws Exception {
Product product = this.persistedProductForTest();
this.productApplicationService.requestProductDiscussion(
new RequestProductDiscussionCommand(
product.tenantId().id(),
product.productId().id()));
Product productWithRequestedDiscussion =
this.productRepository
.productOfId(
product.tenantId(),
product.productId());
assertEquals(DiscussionAvailability.REQUESTED, productWithRequestedDiscussion.discussion().availability());
assertNull(productWithRequestedDiscussion.discussionInitiationId());
this.productApplicationService.startDiscussionInitiation(
new StartDiscussionInitiationCommand(
product.tenantId().id(),
product.productId().id()));
Product productWithDiscussionInitiation =
this.productRepository
.productOfId(
product.tenantId(),<|fim▁hole|>
public void testTimeOutProductDiscussionRequest() throws Exception {
// TODO: student assignment
}
}<|fim▁end|> | product.productId());
assertNotNull(productWithDiscussionInitiation.discussionInitiationId());
} |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>export default function(visitable, deletable, creatable, clickable, attribute, collection, filter) {
return creatable({
visit: visitable('/:dc/acls'),
acls: collection(
'[data-test-tabular-row]',
deletable({
name: attribute('data-test-acl', '[data-test-acl]'),
acl: clickable('a'),
actions: clickable('label'),
use: clickable('[data-test-use]'),<|fim▁hole|> confirmUse: clickable('[data-test-confirm-use]'),
})
),
filter: filter,
});
}<|fim▁end|> | |
<|file_name|>circular_buffer_test.py<|end_file_name|><|fim▁begin|>import unittest
from circular_buffer import (
CircularBuffer,
BufferFullException,
BufferEmptyException
)
class CircularBufferTest(unittest.TestCase):
def test_read_empty_buffer(self):
buf = CircularBuffer(1)
with self.assertRaises(BufferEmptyException):
buf.read()
def test_write_and_read_back_one_item(self):
buf = CircularBuffer(1)
buf.write('1')
self.assertEqual('1', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_write_and_read_back_multiple_items(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
self.assertEqual('1', buf.read())
self.assertEqual('2', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_clearing_buffer(self):
buf = CircularBuffer(3)
for c in '123':
buf.write(c)
buf.clear()
with self.assertRaises(BufferEmptyException):
buf.read()
buf.write('1')
buf.write('2')
self.assertEqual('1', buf.read())
buf.write('3')
self.assertEqual('2', buf.read())
def test_alternate_write_and_read(self):
buf = CircularBuffer(2)
buf.write('1')
self.assertEqual('1', buf.read())
buf.write('2')
self.assertEqual('2', buf.read())
def test_read_back_oldest_item(self):
buf = CircularBuffer(3)
buf.write('1')
buf.write('2')
buf.read()
buf.write('3')
buf.read()
self.assertEqual('3', buf.read())
def test_write_full_buffer(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
with self.assertRaises(BufferFullException):
buf.write('A')
def test_overwrite_full_buffer(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
buf.overwrite('A')
self.assertEqual('2', buf.read())
self.assertEqual('A', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_overwrite_non_full_buffer(self):
buf = CircularBuffer(2)
buf.overwrite('1')
buf.overwrite('2')
self.assertEqual('1', buf.read())
self.assertEqual('2', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_alternate_read_and_overwrite(self):
buf = CircularBuffer(5)
for c in '123':
buf.write(c)
buf.read()
buf.read()<|fim▁hole|> buf.read()
for c in '5678':
buf.write(c)
buf.overwrite('A')
buf.overwrite('B')
self.assertEqual('6', buf.read())
self.assertEqual('7', buf.read())
self.assertEqual('8', buf.read())
self.assertEqual('A', buf.read())
self.assertEqual('B', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
if __name__ == '__main__':
unittest.main()<|fim▁end|> | buf.write('4') |
<|file_name|>scatter_ops_test.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.VariableV1(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)<|fim▁hole|> def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()<|fim▁end|> | |
<|file_name|>groups.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from model.group import Group
# Stałe dane testowe<|fim▁hole|>testData = [
Group(name='name1', header='header1', footer='footer1'),
Group(name='name2', header='header2', footer='footer2')
]<|fim▁end|> | |
<|file_name|>checks.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""A flow to run checks for a host."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.proto import flows_pb2
class CheckFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.CheckFlowArgs
class CheckRunner(flow.GRRFlow):
"""This flow runs checks on a host.
CheckRunner:
- Identifies what checks should be run for a host.
- Identifies the artifacts that need to be collected to perform those checks.
- Orchestrates collection of the host data.
- Routes host data to the relevant checks.
- Returns check data ready for reporting.<|fim▁hole|> """
friendly_name = "Run Checks"
category = "/Checks/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["MapArtifactData"])
def Start(self):
"""."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("knowledge_base",
client.Get(client.Schema.KNOWLEDGE_BASE))
self.state.Register("labels", client.GetLabels())
self.state.Register("artifacts_wanted", set())
self.state.Register("artifacts_fetched", set())
self.state.Register("checks_run", [])
self.state.Register("checks_with_findings", [])
self.state.Register("results_store", None)
self.state.Register("host_data", {})
self.CallState(next_state="MapArtifactData")
@flow.StateHandler(next_state=["AddResponses", "RunChecks"])
def MapArtifactData(self, responses):
"""Get processed data, mapped to artifacts."""
self.state.artifacts_wanted = checks.CheckRegistry.SelectArtifacts(
os=self.state.knowledge_base.os)
# Fetch Artifacts and map results to the artifacts that generated them.
# This is an inefficient collection, but necessary because results need to
# be mapped to the originating artifact. An alternative would be to have
# rdfvalues labeled with originating artifact ids.
for artifact_id in self.state.artifacts_wanted:
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_id],
request_data={"artifact_id": artifact_id},
next_state="AddResponses")
self.CallState(next_state="RunChecks")
@flow.StateHandler()
def AddResponses(self, responses):
artifact_id = responses.request_data["artifact_id"]
# TODO(user): Check whether artifact collection succeeded.
self.state.host_data[artifact_id] = list(responses)
@flow.StateHandler(next_state=["Done"])
def RunChecks(self, responses):
if not responses.success:
raise RuntimeError("Checks did not run successfully.")
# Hand host data across to checks. Do this after all data has been collected
# in case some checks require multiple artifacts/results.
for finding in checks.CheckHost(self.state.host_data,
os=self.state.knowledge_base.os):
self.state.checks_run.append(finding.check_id)
if finding.anomaly:
self.state.checks_with_findings.append(finding.check_id)
self.SendReply(finding)<|fim▁end|> | |
<|file_name|>common_types.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[allow(non_camel_case_types)];
pub use servo_util::geometry::Au;
pub type CSSFloat = f64;
pub static DEFAULT_LINE_HEIGHT: CSSFloat = 1.14;
pub mod specified {
use std::ascii::StrAsciiExt;
use cssparser::ast;
use cssparser::ast::*;
use super::{Au, CSSFloat};
pub use CSSColor = cssparser::Color;
#[deriving(Clone)]
pub enum Length {
Au_(Au), // application units
Em(CSSFloat),
Ex(CSSFloat),
// XXX uncomment when supported:
// Ch(CSSFloat),
// Rem(CSSFloat),
// Vw(CSSFloat),
// Vh(CSSFloat),
// Vmin(CSSFloat),
// Vmax(CSSFloat),
}
static AU_PER_PX: CSSFloat = 60.;
static AU_PER_IN: CSSFloat = AU_PER_PX * 96.;
static AU_PER_CM: CSSFloat = AU_PER_IN / 2.54;
static AU_PER_MM: CSSFloat = AU_PER_IN / 25.4;
static AU_PER_PT: CSSFloat = AU_PER_IN / 72.;
static AU_PER_PC: CSSFloat = AU_PER_PT * 12.;
impl Length {
#[inline]
fn parse_internal(input: &ComponentValue, negative_ok: bool) -> Option<Length> {
match input {
&Dimension(ref value, ref unit) if negative_ok || value.value >= 0.
=> Length::parse_dimension(value.value, unit.as_slice()),
&Number(ref value) if value.value == 0. => Some(Au_(Au(0))),
_ => None
}
}
#[allow(dead_code)]
pub fn parse(input: &ComponentValue) -> Option<Length> {
Length::parse_internal(input, /* negative_ok = */ true)
}
pub fn parse_non_negative(input: &ComponentValue) -> Option<Length> {
Length::parse_internal(input, /* negative_ok = */ false)
}
pub fn parse_dimension(value: CSSFloat, unit: &str) -> Option<Length> {
// FIXME: Workaround for https://github.com/mozilla/rust/issues/10683
let unit_lower = unit.to_ascii_lower();
match unit_lower.as_slice() {
"px" => Some(Length::from_px(value)),
"in" => Some(Au_(Au((value * AU_PER_IN) as i32))),
"cm" => Some(Au_(Au((value * AU_PER_CM) as i32))),
"mm" => Some(Au_(Au((value * AU_PER_MM) as i32))),
"pt" => Some(Au_(Au((value * AU_PER_PT) as i32))),
"pc" => Some(Au_(Au((value * AU_PER_PC) as i32))),
"em" => Some(Em(value)),
"ex" => Some(Ex(value)),
_ => None
}
}
#[inline]
pub fn from_px(px_value: CSSFloat) -> Length {
Au_(Au((px_value * AU_PER_PX) as i32))
}
}
#[deriving(Clone)]
pub enum LengthOrPercentage {
LP_Length(Length),
LP_Percentage(CSSFloat), // [0 .. 100%] maps to [0.0 .. 1.0]
}
impl LengthOrPercentage {
fn parse_internal(input: &ComponentValue, negative_ok: bool)
-> Option<LengthOrPercentage> {
match input {
&Dimension(ref value, ref unit) if negative_ok || value.value >= 0.
=> Length::parse_dimension(value.value, unit.as_slice()).map(LP_Length),
&ast::Percentage(ref value) if negative_ok || value.value >= 0.
=> Some(LP_Percentage(value.value / 100.)),
&Number(ref value) if value.value == 0. => Some(LP_Length(Au_(Au(0)))),
_ => None
}
}
#[allow(dead_code)]
#[inline]
pub fn parse(input: &ComponentValue) -> Option<LengthOrPercentage> {
LengthOrPercentage::parse_internal(input, /* negative_ok = */ true)
}
#[inline]
pub fn parse_non_negative(input: &ComponentValue) -> Option<LengthOrPercentage> {
LengthOrPercentage::parse_internal(input, /* negative_ok = */ false)
}
}
#[deriving(Clone)]
pub enum LengthOrPercentageOrAuto {
LPA_Length(Length),
LPA_Percentage(CSSFloat), // [0 .. 100%] maps to [0.0 .. 1.0]
LPA_Auto,
}
impl LengthOrPercentageOrAuto {
fn parse_internal(input: &ComponentValue, negative_ok: bool)
-> Option<LengthOrPercentageOrAuto> {
match input {
&Dimension(ref value, ref unit) if negative_ok || value.value >= 0.
=> Length::parse_dimension(value.value, unit.as_slice()).map(LPA_Length),
&ast::Percentage(ref value) if negative_ok || value.value >= 0.
=> Some(LPA_Percentage(value.value / 100.)),
&Number(ref value) if value.value == 0. => Some(LPA_Length(Au_(Au(0)))),
&Ident(ref value) if value.eq_ignore_ascii_case("auto") => Some(LPA_Auto),
_ => None
}
}
#[inline]
pub fn parse(input: &ComponentValue) -> Option<LengthOrPercentageOrAuto> {
LengthOrPercentageOrAuto::parse_internal(input, /* negative_ok = */ true)
}
#[inline]
pub fn parse_non_negative(input: &ComponentValue) -> Option<LengthOrPercentageOrAuto> {
LengthOrPercentageOrAuto::parse_internal(input, /* negative_ok = */ false)
}<|fim▁hole|> LPN_Length(Length),
LPN_Percentage(CSSFloat), // [0 .. 100%] maps to [0.0 .. 1.0]
LPN_None,
}
impl LengthOrPercentageOrNone {
fn parse_internal(input: &ComponentValue, negative_ok: bool)
-> Option<LengthOrPercentageOrNone> {
match input {
&Dimension(ref value, ref unit) if negative_ok || value.value >= 0.
=> Length::parse_dimension(value.value, unit.as_slice()).map(LPN_Length),
&ast::Percentage(ref value) if negative_ok || value.value >= 0.
=> Some(LPN_Percentage(value.value / 100.)),
&Number(ref value) if value.value == 0. => Some(LPN_Length(Au_(Au(0)))),
&Ident(ref value) if value.eq_ignore_ascii_case("none") => Some(LPN_None),
_ => None
}
}
#[allow(dead_code)]
#[inline]
pub fn parse(input: &ComponentValue) -> Option<LengthOrPercentageOrNone> {
LengthOrPercentageOrNone::parse_internal(input, /* negative_ok = */ true)
}
#[inline]
pub fn parse_non_negative(input: &ComponentValue) -> Option<LengthOrPercentageOrNone> {
LengthOrPercentageOrNone::parse_internal(input, /* negative_ok = */ false)
}
}
}
pub mod computed {
pub use CSSColor = cssparser::Color;
pub use compute_CSSColor = super::super::longhands::computed_as_specified;
use super::*;
use super::super::longhands;
pub use servo_util::geometry::Au;
pub struct Context {
color: longhands::color::computed_value::T,
inherited_font_weight: longhands::font_weight::computed_value::T,
inherited_font_size: longhands::font_size::computed_value::T,
inherited_minimum_line_height: longhands::_servo_minimum_line_height::T,
inherited_height: longhands::height::T,
font_size: longhands::font_size::computed_value::T,
display: longhands::display::computed_value::T,
positioned: bool,
floated: bool,
border_top_present: bool,
border_right_present: bool,
border_bottom_present: bool,
border_left_present: bool,
is_root_element: bool,
// TODO, as needed: root font size, viewport size, etc.
}
#[inline]
pub fn compute_Au(value: specified::Length, context: &Context) -> Au {
compute_Au_with_font_size(value, context.font_size)
}
/// A special version of `compute_Au` used for `font-size`.
#[inline]
pub fn compute_Au_with_font_size(value: specified::Length, reference_font_size: Au) -> Au {
match value {
specified::Au_(value) => value,
specified::Em(value) => reference_font_size.scale_by(value),
specified::Ex(value) => {
let x_height = 0.5; // TODO: find that from the font
reference_font_size.scale_by(value * x_height)
},
}
}
#[deriving(Eq, Clone)]
pub enum LengthOrPercentage {
LP_Length(Au),
LP_Percentage(CSSFloat),
}
pub fn compute_LengthOrPercentage(value: specified::LengthOrPercentage, context: &Context)
-> LengthOrPercentage {
match value {
specified::LP_Length(value) => LP_Length(compute_Au(value, context)),
specified::LP_Percentage(value) => LP_Percentage(value),
}
}
#[deriving(Eq, Clone)]
pub enum LengthOrPercentageOrAuto {
LPA_Length(Au),
LPA_Percentage(CSSFloat),
LPA_Auto,
}
pub fn compute_LengthOrPercentageOrAuto(value: specified::LengthOrPercentageOrAuto,
context: &Context) -> LengthOrPercentageOrAuto {
match value {
specified::LPA_Length(value) => LPA_Length(compute_Au(value, context)),
specified::LPA_Percentage(value) => LPA_Percentage(value),
specified::LPA_Auto => LPA_Auto,
}
}
#[deriving(Eq, Clone)]
pub enum LengthOrPercentageOrNone {
LPN_Length(Au),
LPN_Percentage(CSSFloat),
LPN_None,
}
pub fn compute_LengthOrPercentageOrNone(value: specified::LengthOrPercentageOrNone,
context: &Context) -> LengthOrPercentageOrNone {
match value {
specified::LPN_Length(value) => LPN_Length(compute_Au(value, context)),
specified::LPN_Percentage(value) => LPN_Percentage(value),
specified::LPN_None => LPN_None,
}
}
}<|fim▁end|> | }
#[deriving(Clone)]
pub enum LengthOrPercentageOrNone { |
<|file_name|>builtin-superkinds-capabilities-xc.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:trait_superkinds_in_metadata.rs
// Tests "capabilities" granted by traits with super-builtin-kinds,
// even when using them cross-crate.
extern crate trait_superkinds_in_metadata;
use std::sync::mpsc::{channel, Sender, Receiver};
use trait_superkinds_in_metadata::{RequiresRequiresShareAndSend, RequiresShare};
#[derive(PartialEq, Debug)]
struct X<T>(T);
impl <T: Sync> RequiresShare for X<T> { }
impl <T: Sync+Send> RequiresRequiresShareAndSend for X<T> { }
fn foo<T: RequiresRequiresShareAndSend + 'static>(val: T, chan: Sender<T>) {
chan.send(val).unwrap();
}<|fim▁hole|> assert_eq!(rx.recv().unwrap(), X(31337));
}<|fim▁end|> |
pub fn main() {
let (tx, rx): (Sender<X<isize>>, Receiver<X<isize>>) = channel();
foo(X(31337), tx); |
<|file_name|>params.py<|end_file_name|><|fim▁begin|>from __future__ import print_function, unicode_literals
import sys
from resources.lib.kodiutils import params as decode
class Params:<|fim▁hole|> url = None
params = Params()<|fim▁end|> | handle = int(sys.argv[1]) if len(sys.argv) > 1 else -1
orig_args = sys.argv[2] if len(sys.argv) > 2 else ''
args = decode(sys.argv[2]) if len(sys.argv) > 2 else {}
resume = sys.argv[3][7:] != 'false' if len(sys.argv) > 3 else False |
<|file_name|>upnp.py<|end_file_name|><|fim▁begin|>"""Provides a UPNP discovery method that mimicks Hue hubs."""
import threading
import socket
import logging
import select
from aiohttp import web
from homeassistant import core
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
class DescriptionXmlView(HomeAssistantView):
"""Handles requests for the description.xml file."""
url = '/description.xml'
name = 'description:xml'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request):
"""Handle a GET request."""
xml_template = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>http://{0}:{1}/</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
<friendlyName>HASS Bridge ({0})</friendlyName>
<manufacturer>Royal Philips Electronics</manufacturer>
<manufacturerURL>http://www.philips.com</manufacturerURL>
<modelDescription>Philips hue Personal Wireless Lighting</modelDescription>
<modelName>Philips hue bridge 2015</modelName>
<modelNumber>BSB002</modelNumber>
<modelURL>http://www.meethue.com</modelURL>
<serialNumber>1234</serialNumber>
<UDN>uuid:2f402f80-da50-11e1-9b23-001788255acc</UDN>
</device>
</root>
"""
resp_text = xml_template.format(
self.config.advertise_ip, self.config.advertise_port)
return web.Response(text=resp_text, content_type='text/xml')
class UPNPResponderThread(threading.Thread):
"""Handle responding to UPNP/SSDP discovery requests."""
_interrupted = False
def __init__(self, host_ip_addr, listen_port, upnp_bind_multicast,
advertise_ip, advertise_port):
"""Initialize the class."""
threading.Thread.__init__(self)
self.host_ip_addr = host_ip_addr
self.listen_port = listen_port
self.upnp_bind_multicast = upnp_bind_multicast
# Note that the double newline at the end of
# this string is required per the SSDP spec
resp_template = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://{0}:{1}/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/0.1
hue-bridgeid: 1234
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:Socket-1_0-221438K0100073::urn:schemas-upnp-org:device:basic:1
"""
self.upnp_response = resp_template.format(
advertise_ip, advertise_port).replace("\n", "\r\n") \
.encode('utf-8')
def run(self):
"""Run the server."""
# Listen for UDP port 1900 packets sent to SSDP multicast address
ssdp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ssdp_socket.setblocking(False)
# Required for receiving multicast
ssdp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self.host_ip_addr))
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton("239.255.255.250") +
socket.inet_aton(self.host_ip_addr))
if self.upnp_bind_multicast:
ssdp_socket.bind(("", 1900))
else:
ssdp_socket.bind((self.host_ip_addr, 1900))
while True:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
try:
read, _, _ = select.select(
[ssdp_socket], [],
[ssdp_socket], 2)
<|fim▁hole|> # most likely the timeout, so check for interupt
continue
except socket.error as ex:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
_LOGGER.error("UPNP Responder socket exception occured: %s",
ex.__str__)
# without the following continue, a second exception occurs
# because the data object has not been initialized
continue
if "M-SEARCH" in data.decode('utf-8'):
# SSDP M-SEARCH method received, respond to it with our info
resp_socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
resp_socket.sendto(self.upnp_response, addr)
resp_socket.close()
def stop(self):
"""Stop the server."""
# Request for server
self._interrupted = True
self.join()
def clean_socket_close(sock):
"""Close a socket connection and logs its closure."""
_LOGGER.info("UPNP responder shutting down.")
sock.close()<|fim▁end|> | if ssdp_socket in read:
data, addr = ssdp_socket.recvfrom(1024)
else: |
<|file_name|>EuropeContinentBundle_fi.js<|end_file_name|><|fim▁begin|>/**<|fim▁hole|><|fim▁end|> | * Copyright (c) 2014, Oracle and/or its affiliates.
* All rights reserved.
*/
"use strict";var l={"EU":["EU","Eurooppa"]};(this?this:window)['DvtBaseMapManager']['_UNPROCESSED_MAPS'][2].push(["europe","continent",l]); |
<|file_name|>apps.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals<|fim▁hole|>
class ProfileConfig(AppConfig):
name = "profiles"
verbose_name = 'User Profiles'
def ready(self):
from . import signals # noqa<|fim▁end|> | from django.apps import AppConfig
|
<|file_name|>test_jira_hook.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an<|fim▁hole|># under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
self.assertTrue(jira_mock.called)
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations |
<|file_name|>handlers.py<|end_file_name|><|fim▁begin|>from django.conf import settings
from django.core.handlers.base import get_path_info
from django.core.handlers.wsgi import WSGIHandler
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import url2pathname
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""<|fim▁hole|> """
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404 as e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)<|fim▁end|> | relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def serve(self, request): |
<|file_name|>navCategories.js<|end_file_name|><|fim▁begin|>import { tshirtImageVersions, imageVersionProps } from './fragments'
export default `
${imageVersionProps}
${tshirtImageVersions}<|fim▁hole|> id
name
slug
level
tileImage { ...tshirtImageVersions }
}
}
`<|fim▁end|> | {
categoryNav { |
<|file_name|>guest.js<|end_file_name|><|fim▁begin|>var express = require('express');
var router = express.Router();
var sqlite3 = require('sqlite3').verbose()
router.get('/', function (req, res) {
var db = new sqlite3.Database('./database.db')
db.serialize(function () {
var query = 'SELECT * FROM guest';
db.all(query, function (err, rows) {
if (err) {
console.log(err)
res.sendStatus(500)
} else {
console.log("Query rows = " + rows.length)
var rowsJson = []
for (var i = 0; i < rows.length; i = i + 1) {
var response = { lastname: '', firstname: '', presentWR: false, presentEvening: false }
response.lastname = rows[i].lastname;
response.firstname = rows[i].firstname;
response.presentWR = (rows[i].presentWR != 0);
response.presentEvening = (rows[i].presentEvening != 0);
rowsJson[rowsJson.length] = response;
}
res.status(200)
res.send(rowsJson)
}
db.close()
});
})
});
router.post('/:lastname/:firstname', function (req, res, next) {
if (typeof req.params.firstname !== 'undefined' && typeof req.params.lastname !== 'undefined') {
var firstname = req.params.firstname;
var lastname = req.params.lastname;
var presentWR = 0;
var presentEvening = 0;
console.log(typeof req.body.presentWR);
console.log(typeof req.body.presentEvening);
if (typeof req.body.presentWR !== 'undefined' && req.body.presentWR == 'true') {
presentWR = 1;
}
if (typeof req.body.presentEvening !== 'undefined' && req.body.presentEvening == 'true') {
presentEvening = 1;
}
var db = new sqlite3.Database('./database.db')
db.serialize(function () {
db.run('INSERT OR REPLACE INTO guest(firstname,lastname,presentWR,presentEvening) VALUES (\'' + firstname + '\',\'' + lastname + '\',' + presentWR + ',' + presentEvening + ')', function (err) {
if (err) {
console.log(err)
res.sendStatus(500)
} else {
res.sendStatus(200);
}
db.close()
})
})
}
else
res.sendStatus(400);
});
router.get('/:lastname/:firstname', function (req, res) {
if (typeof req.params.firstname !== 'undefined' && typeof req.params.lastname !== 'undefined') {
var firstname = req.params.firstname;
var lastname = req.params.lastname;
var response = {lastname:'', firstname:'', presentWR : false, presentEvening : false}
var db = new sqlite3.Database('./database.db')
db.serialize(function () {
var query = 'SELECT * FROM guest WHERE firstname = \'' + firstname + '\' AND lastname = \'' + lastname + '\'';
db.all(query, function (err, rows) {
if (err) {
console.log(err)
res.sendStatus(500)
} else {
console.log("Query rows = " + rows.length)
if (rows.length > 0) {
response.lastname = rows[0].lastname;
response.firstname = rows[0].firstname;
response.presentWR = (rows[0].presentWR != 0);
response.presentEvening = (rows[0].presentEvening != 0);
res.status(200)
res.send(response)
} else {
res.sendStatus(404)
}
}
<|fim▁hole|> else
res.sendStatus(400);
});
router.delete('/:lastname/:firstname', function (req, res) {
if (typeof req.params.firstname !== 'undefined' && typeof req.params.lastname !== 'undefined') {
var firstname = req.params.firstname;
var lastname = req.params.lastname;
var db = new sqlite3.Database('./database.db')
db.serialize(function () {
var query = 'DELETE FROM guest WHERE firstname = \'' + firstname + '\' AND lastname = \'' + lastname + '\'';
db.run(query, function (err) {
if (err) {
console.log(err)
res.sendStatus(500)
} else {
console.log("Changes = " + this.changes)
if (this.changes > 0) {
res.sendStatus(200)
} else {
res.sendStatus(404)
}
}
db.close()
});
})
}
else
res.sendStatus(400);
});
router.put('/:lastname/:firstname', function (req, res) {
if (typeof req.params.firstname !== 'undefined' && typeof req.params.lastname !== 'undefined' && (typeof req.body.presentWR !== 'undefined' || typeof req.body.presentEvening !== 'undefined')) {
var firstname = req.params.firstname;
var lastname = req.params.lastname;
var presentWR = (req.body.presentWR == "true") ? 1 : 0;
var presentEvening = (req.body.presentEvening == "true") ? 1 : 0;
var db = new sqlite3.Database('./database.db')
db.serialize(function () {
var query = 'UPDATE guest SET presentWR=' + presentWR + ',presentEvening=' + presentEvening + ' WHERE firstname = \'' + firstname + '\' AND lastname = \'' + lastname + '\'';
db.run(query, function (err) {
if (err) {
console.log(err)
res.sendStatus(500)
} else {
console.log("Changes = " + this.changes)
if (this.changes > 0) {
res.sendStatus(200)
} else {
res.sendStatus(404)
}
}
db.close()
});
})
}
else
res.sendStatus(400);
});
module.exports = router;<|fim▁end|> | db.close()
});
})
} |
<|file_name|>tasks.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
use std::fmt;
use crate::intrinsics::Intrinsics;
use crate::python::{Function, TypeId};
use crate::selectors::{DependencyKey, Get, Select};
use indexmap::IndexSet;
use log::Level;
use rule_graph::{DisplayForGraph, DisplayForGraphArgs, Query};
#[derive(Eq, Hash, PartialEq, Clone, Debug)]
pub enum Rule {
// Intrinsic rules are implemented in rust.
Intrinsic(Intrinsic),
// Task rules are implemented in python.
Task(Task),
}
impl DisplayForGraph for Rule {
fn fmt_for_graph(&self, display_args: DisplayForGraphArgs) -> String {
match self {
Rule::Task(ref task) => {
let task_name = task.func.full_name();
let product = format!("{}", task.product);
let clause_portion = Self::formatted_select_clause(&task.clause, display_args);
let get_clauses = task
.gets
.iter()
.map(::std::string::ToString::to_string)
.collect::<Vec<_>>();
let get_portion = if get_clauses.is_empty() {
"".to_string()
} else if get_clauses.len() > 1 {
format!(
",{}gets=[{}{}{}]",
display_args.line_separator(),
display_args.optional_line_separator(),
get_clauses.join(&format!(",{}", display_args.line_separator())),
display_args.optional_line_separator(),
)
} else {
format!(", gets=[{}]", get_clauses.join(", "))
};
let rule_type = if task.cacheable {
"rule".to_string()
} else {
"goal_rule".to_string()
};
format!(
"@{}({}({}) -> {}{})",
rule_type, task_name, clause_portion, product, get_portion,
)
}
Rule::Intrinsic(ref intrinsic) => format!(
"@rule(<intrinsic>({}) -> {})",
Self::formatted_select_clause(&intrinsic.inputs, display_args),
intrinsic.product,
),
}
}
}
impl rule_graph::Rule for Rule {
type TypeId = TypeId;
type DependencyKey = DependencyKey;
fn product(&self) -> TypeId {
match self {
Rule::Task(t) => t.product,
Rule::Intrinsic(i) => i.product,
}
}
fn dependency_keys(&self) -> Vec<DependencyKey> {
match self {
&Rule::Task(Task {
ref clause,
ref gets,
..
}) => clause
.iter()
.map(|t| DependencyKey::JustSelect(Select::new(*t)))
.chain(gets.iter().map(|g| DependencyKey::JustGet(*g)))
.collect(),
&Rule::Intrinsic(Intrinsic { ref inputs, .. }) => inputs
.iter()
.map(|t| DependencyKey::JustSelect(Select::new(*t)))
.collect(),
}
}
fn require_reachable(&self) -> bool {
match self {
&Rule::Task(_) => true,
&Rule::Intrinsic(_) => false,
}
}
fn color(&self) -> Option<rule_graph::Palette> {
match self {
Rule::Task(_) => None,
Rule::Intrinsic(_) => Some(rule_graph::Palette::Gray),
}
}
}
impl Rule {
fn formatted_select_clause(clause: &[TypeId], display_args: DisplayForGraphArgs) -> String {
let select_clauses = clause
.iter()
.map(|type_id| type_id.to_string())
.collect::<Vec<_>>();
if select_clauses.len() > 1 {
format!(
"{}{}{}",
display_args.optional_line_separator(),
select_clauses.join(&format!(",{}", display_args.line_separator())),
display_args.optional_line_separator(),
)
} else {
select_clauses.join(", ")
}
}
}
impl fmt::Display for Rule {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"{}",
self.fmt_for_graph(DisplayForGraphArgs { multiline: false })
)
}
}
<|fim▁hole|> pub product: TypeId,
pub side_effecting: bool,
pub engine_aware_return_type: bool,
pub clause: Vec<TypeId>,
pub gets: Vec<Get>,
// TODO: This is a preliminary implementation of #12934: we should overhaul naming to
// align Query and @union/Protocol as described there.
pub unions: Vec<Query<Rule>>,
pub func: Function,
pub cacheable: bool,
pub display_info: DisplayInfo,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct DisplayInfo {
pub name: String,
pub desc: Option<String>,
pub level: Level,
}
#[derive(Eq, Hash, PartialEq, Clone, Debug)]
pub struct Intrinsic {
pub product: TypeId,
pub inputs: Vec<TypeId>,
}
///
/// Registry of native (rust) Intrinsic tasks and user (python) Tasks.
///
#[derive(Clone, Debug)]
pub struct Tasks {
rules: IndexSet<Rule>,
// Used during the construction of a rule.
preparing: Option<Task>,
queries: IndexSet<Query<Rule>>,
}
///
/// A collection of Rules (TODO: rename to Rules).
///
/// Defines a stateful lifecycle for defining tasks via the C api. Call in order:
/// 1. task_begin() - once per task
/// 2. add_*() - zero or more times per task to add input clauses
/// 3. task_end() - once per task
///
/// (This protocol was original defined in a Builder, but that complicated the C lifecycle.)
///
impl Tasks {
pub fn new() -> Tasks {
Tasks {
rules: IndexSet::default(),
preparing: None,
queries: IndexSet::default(),
}
}
pub fn rules(&self) -> &IndexSet<Rule> {
&self.rules
}
pub fn queries(&self) -> &IndexSet<Query<Rule>> {
&self.queries
}
pub fn intrinsics_set(&mut self, intrinsics: &Intrinsics) {
for intrinsic in intrinsics.keys() {
self.rules.insert(Rule::Intrinsic(intrinsic.clone()));
}
}
///
/// The following methods define the Task registration lifecycle.
///
pub fn task_begin(
&mut self,
func: Function,
return_type: TypeId,
side_effecting: bool,
engine_aware_return_type: bool,
cacheable: bool,
name: String,
desc: Option<String>,
level: Level,
) {
assert!(
self.preparing.is_none(),
"Must `end()` the previous task creation before beginning a new one!"
);
self.preparing = Some(Task {
cacheable,
product: return_type,
side_effecting,
engine_aware_return_type,
clause: Vec::new(),
gets: Vec::new(),
unions: Vec::new(),
func,
display_info: DisplayInfo { name, desc, level },
});
}
pub fn add_get(&mut self, output: TypeId, input: TypeId) {
self
.preparing
.as_mut()
.expect("Must `begin()` a task creation before adding gets!")
.gets
.push(Get { output, input });
}
pub fn add_union(&mut self, product: TypeId, params: Vec<TypeId>) {
let query = Query::new(product, params);
self.queries.insert(query.clone());
self
.preparing
.as_mut()
.expect("Must `begin()` a task creation before adding unions!")
.unions
.push(query);
}
pub fn add_select(&mut self, selector: TypeId) {
self
.preparing
.as_mut()
.expect("Must `begin()` a task creation before adding clauses!")
.clause
.push(selector);
}
pub fn task_end(&mut self) {
// Move the task from `preparing` to the Rules map
let task = self
.preparing
.take()
.expect("Must `begin()` a task creation before ending it!");
self.rules.insert(Rule::Task(task));
}
pub fn query_add(&mut self, product: TypeId, params: Vec<TypeId>) {
self.queries.insert(Query::new(product, params));
}
}<|fim▁end|> | #[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Task { |
<|file_name|>home.component.ts<|end_file_name|><|fim▁begin|>/**
* Created by kevin on 12/21/16.
*/
import {Component,OnInit} from '@angular/core';
var fa = require("fontawesome");
@Component({
selector: 'home',
templateUrl: './home.component.html',
styleUrls: ['./home.component.css']
})
export class HomeComponent {<|fim▁hole|> console.log(fa.fortAwesome + " Hello World!");
}
}<|fim▁end|> |
ngOnInit() {
console.log(fa("fort-awesome") + " Hello World!"); |
<|file_name|>pytorch_runner.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
import torch.utils.data
import ray
from ray.experimental.sgd.pytorch import pytorch_utils
from ray.experimental.sgd import utils
logger = logging.getLogger(__name__)
class PyTorchRunner(object):
"""Manages a PyTorch model for training."""
def __init__(self,
model_creator,
data_creator,
optimizer_creator,
config=None,
batch_size=16):
"""Initializes the runner.
Args:
model_creator (dict -> torch.nn.Module): see pytorch_trainer.py.
data_creator (dict -> Dataset, Dataset): see pytorch_trainer.py.
optimizer_creator (torch.nn.Module, dict -> loss, optimizer):
see pytorch_trainer.py.
config (dict): see pytorch_trainer.py.
batch_size (int): see pytorch_trainer.py.
"""
self.model_creator = model_creator
self.data_creator = data_creator
self.optimizer_creator = optimizer_creator
self.config = {} if config is None else config
self.batch_size = batch_size
self.verbose = True
self.epoch = 0
self._timers = {
k: utils.TimerStat(window_size=1)
for k in [
"setup_proc", "setup_model", "get_state", "set_state",
"validation", "training"
]
}
def setup(self):
"""Initializes the model."""
logger.debug("Creating model")
self.model = self.model_creator(self.config)
if torch.cuda.is_available():
self.model = self.model.cuda()
logger.debug("Creating optimizer")
self.criterion, self.optimizer = self.optimizer_creator(
self.model, self.config)
if torch.cuda.is_available():
self.criterion = self.criterion.cuda()
logger.debug("Creating dataset")
self.training_set, self.validation_set = self.data_creator(self.config)
self.train_loader = torch.utils.data.DataLoader(
self.training_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=2,
pin_memory=False)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=2,
pin_memory=False)
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return utils.find_free_port()
def step(self):
"""Runs a training epoch and updates the model parameters."""
logger.debug("Begin Training Epoch {}".format(self.epoch + 1))
with self._timers["training"]:
train_stats = pytorch_utils.train(self.train_loader, self.model,
self.criterion, self.optimizer)
train_stats["epoch"] = self.epoch
self.epoch += 1
train_stats.update(self.stats())
return train_stats
def validate(self):
"""Evaluates the model on the validation data set."""
with self._timers["validation"]:
validation_stats = pytorch_utils.validate(
self.validation_loader, self.model, self.criterion)
validation_stats.update(self.stats())
return validation_stats
def stats(self):
"""Returns a dictionary of statistics collected."""
stats = {"epoch": self.epoch}
for k, t in self._timers.items():
stats[k + "_time_mean"] = t.mean
stats[k + "_time_total"] = t.sum
t.reset()
return stats
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),<|fim▁hole|>
def set_state(self, state):
"""Sets the state of the model."""
# TODO: restore timer stats
self.model.load_state_dict(state["model"])
self.optimizer.load_state_dict(state["optimizer"])
self.epoch = state["stats"]["epoch"]
def shutdown(self):
"""Attempts to shut down the worker."""
del self.validation_loader
del self.validation_set
del self.train_loader
del self.training_set
del self.criterion
del self.optimizer
del self.model
if torch.cuda.is_available():
torch.cuda.empty_cache()<|fim▁end|> | "stats": self.stats()
} |
<|file_name|>constants.py<|end_file_name|><|fim▁begin|>SEFARIA_API_NODE = "https://www.sefaria.org/api/texts/"
CACHE_MONITOR_LOOP_DELAY_IN_SECONDS = 86400<|fim▁hole|>CACHE_LIFETIME_SECONDS = 604800
category_colors = {
"Commentary": "#4871bf",
"Tanakh": "#004e5f",
"Midrash": "#5d956f",
"Mishnah": "#5a99b7",
"Talmud": "#ccb479",
"Halakhah": "#802f3e",
"Kabbalah": "#594176",
"Philosophy": "#7f85a9",
"Liturgy": "#ab4e66",
"Tanaitic": "#00827f",
"Parshanut": "#9ab8cb",
"Chasidut": "#97b386",
"Musar": "#7c406f",
"Responsa": "#cb6158",
"Apocrypha": "#c7a7b4",
"Other": "#073570",
"Quoting Commentary": "#cb6158",
"Sheets": "#7c406f",
"Community": "#7c406f",
"Targum": "#7f85a9",
"Modern Works": "#7c406f",
"Modern Commentary": "#7c406f",
}
platform_settings = {
"twitter": {
"font_size": 29,
"additional_line_spacing_he": 5,
"additional_line_spacing_en": -10,
"image_width": 506,
"image_height": 253,
"margin": 20,
"category_color_line_width": 7,
"sefaria_branding": False,
"branding_height": 0
},
"facebook": {
"font_size": 70,
"additional_line_spacing_he": 12,
"additional_line_spacing_en": -20,
"image_width": 1200,
"image_height": 630,
"margin": 40,
"category_color_line_width": 15,
"sefaria_branding": False,
"branding_height": 0
},
"instagram": {
"font_size": 70,
"additional_line_spacing_he": 12,
"additional_line_spacing_en": 0,
"image_width": 1040,
"image_height": 1040,
"margin": 40,
"category_color_line_width": 13,
"sefaria_branding": True,
"branding_height": 100
}
}<|fim▁end|> | |
<|file_name|>create.py<|end_file_name|><|fim▁begin|>from oauth2client.service_account import ServiceAccountCredentials
from googleapiclient.discovery import build
from google.oauth2 import service_account
from common.methods import set_progress
from infrastructure.models import CustomField, Environment
from pathlib import Path
import json, tempfile
import os
import zipfile
import time
import io
from django.conf import settings
from googleapiclient.http import MediaIoBaseUpload
def generate_custom_fields():
CustomField.objects.get_or_create(
name='function_name', defaults={'label': 'function name', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name given to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='available_memory_mb', defaults={'label': 'Memory', 'type': 'INT', 'show_as_attribute': True,
'description': 'Memory allocated to the Google Cloud function'}
)
CustomField.objects.get_or_create(
name='entry_point', defaults={'label': 'EntryPoint', 'type': 'STR', 'show_as_attribute': True,
'description': 'Name of a function exported by the module specified in '
'directory with source code'}
)
CustomField.objects.get_or_create(
name='runtime', defaults={'label': 'Runtime', 'type': 'STR', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='service_account_email', defaults={'label': 'serviceAccountEmail',
'type': 'STR',
'show_as_attribute': False,
'description':
'Service account that the function will assume as its identity.'}
)
CustomField.objects.get_or_create(
name='https_trigger', defaults={'label': 'HttpsTrigger',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to trigger the google function'}
)
CustomField.objects.get_or_create(
name='source_archive_url', defaults={'label': 'sourceArchiveUrl',
'type': 'STR',
'show_as_attribute': True,
'description':
'Url to where the source code of the function is located.'}
)
CustomField.objects.get_or_create(
name='google_rh_id', defaults={'label': 'Resource Handler',
'type': 'STR',
'show_as_attribute': False})
FUNCTIONS_VALID_REGIONS = ['us-central1', 'us-east1',
'asia-east2', 'asia-northeast1', 'europe-west1', 'europe-west2']
def generate_options_for_env_id(server=None, **kwargs):
gcp_envs = Environment.objects.filter(
resource_handler__resource_technology__name="Google Cloud Platform")
options = []
for env in gcp_envs:
options.append((env.id, env.name))<|fim▁hole|> raise RuntimeError("No valid Google Cloud Platform resource handlers in CloudBolt")
return options
def generate_options_for_runtime(**kwargs):
return [("nodejs8", "Node JS 8"),
("nodejs10", "Node JS 10"),
("python37", "Python 3.7"),
("go111", "Node JS 8"), ]
def generate_options_for_bucket_to_store_sourcecode(control_value=None, **kwargs):
buckets = []
if control_value:
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=project_id).name
storage_client = create_build_client(rh,project_id,'storage')
list_bucket=storage_client.buckets().list(project=project).execute()
buckets = [bucket.get('name') for bucket in list_bucket.get('items')]
return buckets
def generate_options_for_enter_sourcecode_or_bucket_url(**kwargs):
return ['SourceCode', 'BucketUrl']
def generate_options_for_available_memory_mb(**kwargs):
return [
(128, '128 MB'),
(256, '256 MB'),
(512, '512 MB'),
(1024, '1 GB'),
(2048, '2 GB'),
]
def generate_options_for_gcp_region(control_value=None,**kwargs):
if control_value is None:
return []
environment = Environment.objects.get(id=control_value)
project_id=environment.gcp_project
rh = environment.resource_handler.cast()
project=rh.gcp_projects.get(id=environment.gcp_project).name
client = create_build_client(rh,project_id,'cloudfunctions')
locations=client.projects().locations().list(name=f'projects/{project}').execute()
return [region.get('locationId') for region in locations['locations']]
def create_build_client(rh,project_id,servicename):
'''method to create cloud build client for given service'''
account_info = json.loads(rh.gcp_projects.get(id=project_id).service_account_info)
credentials=service_account.Credentials.from_service_account_info(account_info)
client=build(servicename, "v1", credentials=credentials, cache_discovery=False)
return client
def validate_file_name(runtime,filename):
"""
Every runtime has
-specific file that is expected by google cloud functions
"""
runtimes = {
'python37': 'main.py',
'nodejs8': 'index.js',
'nodejs10': 'index.js',
'go111': 'function.go'
}
return (runtimes.get(runtime)==filename)
def create_file_with_sourcecode(sourcecode):
# Creates a temporary file containing the sourcecode passed.
path=sourcecode
filename=Path(sourcecode).name
if path.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
path = path.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
path = os.path.join(settings.MEDIA_ROOT, path)
archive=io.BytesIO()
with zipfile.ZipFile(archive, 'w') as zip_archive:
with open(path, 'r') as file:
zip_file = zipfile.ZipInfo(filename)
zip_archive.writestr(zip_file, file.read())
archive.seek(0)
media=MediaIoBaseUpload(archive, mimetype='application/zip')
return media
def upload_file_to_s3(storage_client, bucket_name, file,func_name):
'''method to upload file in bucket'''
body={'name': func_name}
object=storage_client.objects()
obj_insert=object.insert(bucket=bucket_name,body=body,media_body=file).execute()
return bucket_name+'/'+func_name
def run(resource, logger=None, **kwargs):
environment = Environment.objects.get(id='{{ env_id }}')
function_name = '{{ function_name }}'
source_code = """{{ source_code }}"""
entry_point = '{{ entry_point }}'
available_memory_mb = '{{ available_memory_mb }}'
runtime = '{{ runtime }}'
bucket = '{{ bucket_to_store_sourcecode }}'
cloud_storage_location = '{{ cloud_storage_location }}'
enter_sourcecode_or_bucket_url = "{{enter_sourcecode_or_bucket_url}}"
region = "{{gcp_region}}"
rh = environment.resource_handler.cast()
project = environment.gcp_project
account_info = json.loads(rh.gcp_projects.get(id=project).service_account_info)
project_name=account_info['project_id']
service_name = 'cloudfunctions'
client = create_build_client(rh,project,service_name)
set_progress("Connection to google cloud established")
# validate a file with an extension corresponding to the runtime selected
storage_client = create_build_client(rh,project,'storage')
if not cloud_storage_location:
filename=Path(source_code).name
if validate_file_name(runtime,filename):
sourcecode_location = create_file_with_sourcecode(source_code)
else:
return "FAILURE","Please provide valid file.",""
file_location = upload_file_to_s3(storage_client, bucket, sourcecode_location,function_name)
else:
file_location = cloud_storage_location
# Need a way to be sure upload has completed
time.sleep(5)
body = {
"name": f"projects/{project_name}/locations/{region}/functions/{function_name}",
"httpsTrigger": {
"url": f"https://{region}-{project_name}.cloudfunctions.net/{function_name}"
},
"status": "ACTIVE",
"entryPoint": f"{entry_point}",
"timeout": "60s",
"availableMemoryMb": int(available_memory_mb),
"serviceAccountEmail": account_info.get('client_email'),
"runtime": f"{runtime}",
"sourceArchiveUrl": f"gs://{file_location}",
}
set_progress("Writing file to google cloud function")
result = client.projects().locations().functions().create(
location=f"projects/{project_name}/locations/{region}", body=body).execute()
if result.get('name'):
generate_custom_fields()
resource.name = function_name
resource.google_rh_id = rh.id
resource.function_name = f"projects/{project_name}/locations/{region}/functions/{function_name}"
resource.available_memory_mb = available_memory_mb
resource.entry_point = entry_point
resource.runtime = runtime
resource.service_account_email = rh.serviceaccount
resource.https_trigger = result.get('metadata').get('request').get('httpsTrigger').get('url')
resource.source_archive_url = result.get('metadata').get('request').get('sourceArchiveUrl')
resource.save()
return "SUCCESS", "", ""
return "FAILURE", "", ""<|fim▁end|> | if not options: |
<|file_name|>BaseTaskPool.java<|end_file_name|><|fim▁begin|>package com.superman.letusgo.base;
import java.util.HashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import android.content.Context;
import android.widget.Toast;
import com.superman.letusgo.util.AppClient;
import com.superman.letusgo.util.HttpUtil;
public class BaseTaskPool {
<|fim▁hole|> static private ExecutorService taskPool;
// for HttpUtil.getNetType
private Context context;
public BaseTaskPool (BaseUi ui) {
this.context = ui.getContext();
taskPool = Executors.newCachedThreadPool();
}
// http post task with params
public void addTask (int taskId, String taskUrl, HashMap<String, String> taskArgs, BaseTask baseTask, int delayTime) {
baseTask.setId(taskId);
try {
taskPool.execute(new TaskThread(context, taskUrl, taskArgs, baseTask, delayTime));
} catch (Exception e) {
taskPool.shutdown();
}
}
// http post task without params
public void addTask (int taskId, String taskUrl, BaseTask baseTask, int delayTime) {
baseTask.setId(taskId);
try {
Toast.makeText(context, "11111111111", Toast.LENGTH_SHORT);
taskPool.execute(new TaskThread(context, taskUrl, null, baseTask, delayTime));
} catch (Exception e) {
taskPool.shutdown();
}
}
// custom task
public void addTask (int taskId, BaseTask baseTask, int delayTime) {
baseTask.setId(taskId);
try {
taskPool.execute(new TaskThread(context, null, null, baseTask, delayTime));
} catch (Exception e) {
taskPool.shutdown();
}
}
// task thread logic
private class TaskThread implements Runnable {
private Context context;
private String taskUrl;
private HashMap<String, String> taskArgs;
private BaseTask baseTask;
private int delayTime = 0;
public TaskThread(Context context, String taskUrl, HashMap<String, String> taskArgs, BaseTask baseTask, int delayTime) {
this.context = context;
this.taskUrl = taskUrl;
this.taskArgs = taskArgs;
this.baseTask = baseTask;
this.delayTime = delayTime;
}
@Override
public void run() {
try {
baseTask.onStart();
String httpResult = null;
// set delay time
if (this.delayTime > 0) {
Thread.sleep(this.delayTime);
}
try {
// remote task
if (this.taskUrl != null) {
// init app client
AppClient client = new AppClient(this.taskUrl);
if (HttpUtil.WAP_INT == HttpUtil.getNetType(context)) {
client.useWap();
}
// http get
if (taskArgs == null) {
httpResult = client.get();
// http post
} else {
httpResult = client.post(this.taskArgs);
}
}
// remote task
if (httpResult != null) {
baseTask.onComplete(httpResult);
// local task
} else {
baseTask.onComplete();
}
} catch (Exception e) {
baseTask.onError(e.getMessage());
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
baseTask.onStop();
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
}<|fim▁end|> |
// task thread pool
|
<|file_name|>test_cloud_dns.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import random
import time
import unittest
from mock import call
from mock import patch
from mock import MagicMock as Mock
import pyrax
from pyrax.manager import BaseManager
from pyrax.clouddns import assure_domain
from pyrax.clouddns import CloudDNSClient
from pyrax.clouddns import CloudDNSDomain
from pyrax.clouddns import CloudDNSManager
from pyrax.clouddns import CloudDNSRecord
from pyrax.clouddns import ResultsIterator
from pyrax.clouddns import DomainResultsIterator
from pyrax.clouddns import SubdomainResultsIterator
from pyrax.clouddns import RecordResultsIterator
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
example_uri = "http://example.com"
class CloudDNSTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CloudDNSTest, self).__init__(*args, **kwargs)
def setUp(self):
super(CloudDNSTest, self).setUp()
self.client = fakes.FakeDNSClient()
self.client._manager = fakes.FakeDNSManager(self.client)
self.client._manager._set_delay(0.000001)
self.domain = fakes.FakeDNSDomain()
self.domain.manager = self.client._manager
def tearDown(self):
super(CloudDNSTest, self).tearDown()
self.client = None
self.domain = None
def test_assure_domain(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
d1 = test(clt, dom)
self.assertEqual(d1, dom)
self.assertTrue(isinstance(d1, CloudDNSDomain))
def test_assure_domain_id(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
clt._manager._get = Mock(return_value=dom)
d2 = test(clt, dom.id)
self.assertEqual(d2, dom)
self.assertTrue(isinstance(d2, CloudDNSDomain))
def test_assure_domain_name(self):
@assure_domain
def test(self, domain):
return domain
clt = self.client
dom = self.domain
clt._manager._get = Mock(side_effect=exc.NotFound(""))
clt._manager._list = Mock(return_value=[dom])
d3 = test(clt, dom.name)
self.assertEqual(d3, dom)
self.assertTrue(isinstance(d3, CloudDNSDomain))
def test_set_timeout(self):
clt = self.client
mgr = clt._manager
new_timeout = random.randint(0, 99)
clt.set_timeout(new_timeout)
self.assertEqual(mgr._timeout, new_timeout)
def test_set_delay(self):
clt = self.client
mgr = clt._manager
new_delay = random.randint(0, 99)
clt.set_delay(new_delay)
self.assertEqual(mgr._delay, new_delay)
def test_reset_paging_all(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["total_entries"] = 99
mgr._paging["record"]["next_uri"] = example_uri
mgr._reset_paging("all")
self.assertIsNone(mgr._paging["domain"]["total_entries"])
self.assertIsNone(mgr._paging["record"]["next_uri"])
def test_reset_paging_body(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["total_entries"] = 99
mgr._paging["domain"]["next_uri"] = "FAKE"
exp_entries = random.randint(100, 200)
uri_string_next = utils.random_unicode()
next_uri = "%s/domains/%s" % (example_uri, uri_string_next)
uri_string_prev = utils.random_unicode()
prev_uri = "%s/domains/%s" % (example_uri, uri_string_prev)
body = {"totalEntries": exp_entries,
"links": [
{"href": next_uri,
"rel": "next"},
{"href": prev_uri,
"rel": "previous"}]}
mgr._reset_paging("domain", body=body)
self.assertEqual(mgr._paging["domain"]["total_entries"], exp_entries)
self.assertEqual(mgr._paging["domain"]["next_uri"], "/domains/%s" %
uri_string_next)
self.assertEqual(mgr._paging["domain"]["prev_uri"], "/domains/%s" %
uri_string_prev)
def test_get_pagination_qs(self):
clt = self.client
mgr = clt._manager
test_limit = random.randint(1, 100)
test_offset = random.randint(1, 100)
qs = mgr._get_pagination_qs(test_limit, test_offset)
self.assertEqual(qs, "?limit=%s&offset=%s" % (test_limit, test_offset))
def test_manager_list(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
clt.method_get = Mock(return_value=({}, ret_body))
ret = clt.list()
self.assertEqual(len(ret), 1)
def test_manager_list_all(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
uri_string_next = utils.random_unicode()
next_uri = "%s/domains/%s" % (example_uri, uri_string_next)
mgr.count = 0
def mock_get(uri):
if mgr.count:
return ({}, ret_body)
mgr.count += 1
ret = {"totalEntries": 2,
"links": [
{"href": next_uri,
"rel": "next"}]}
ret.update(ret_body)
return ({}, ret)
clt.method_get = Mock(wraps=mock_get)
ret = mgr._list(example_uri, list_all=True)
self.assertEqual(len(ret), 2)
def test_list_previous_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["prev_uri"] = example_uri
mgr._list = Mock()
clt.list_previous_page()
mgr._list.assert_called_once_with(example_uri)
def test_list_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_previous_page)
def test_list_next_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["domain"]["next_uri"] = example_uri
mgr._list = Mock()
clt.list_next_page()
mgr._list.assert_called_once_with(example_uri)
def test_list_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_next_page)
def test_list_subdomains_previous_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["subdomain"]["prev_uri"] = example_uri
mgr._list_subdomains = Mock()
clt.list_subdomains_previous_page()
mgr._list_subdomains.assert_called_once_with(example_uri)
def test_list_subdomains_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_subdomains_previous_page)
def test_list_subdomains_next_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["subdomain"]["next_uri"] = example_uri
mgr._list_subdomains = Mock()
clt.list_subdomains_next_page()
mgr._list_subdomains.assert_called_once_with(example_uri)
def test_list_subdomains_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_subdomains_next_page)
def test_list_records_previous_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["record"]["prev_uri"] = example_uri
mgr._list_records = Mock()
clt.list_records_previous_page()
mgr._list_records.assert_called_once_with(example_uri)
def test_list_records_previous_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_records_previous_page)
def test_list_records_next_page(self):
clt = self.client
mgr = clt._manager
mgr._paging["record"]["next_uri"] = example_uri
mgr._list_records = Mock()
clt.list_records_next_page()
mgr._list_records.assert_called_once_with(example_uri)
def test_list_records_next_page_fail(self):
clt = self.client
mgr = clt._manager
self.assertRaises(exc.NoMoreResults, clt.list_records_next_page)
def test_manager_get(self):
ret_body = {"recordsList": {
"records": [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "[email protected]",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}}
mgr = self.client._manager
mgr.api.method_get = Mock(return_value=(None, ret_body))
dom = mgr._get("fake")
self.assertTrue(isinstance(dom, CloudDNSDomain))
def test_manager_create(self):
clt = self.client
mgr = clt._manager
ret_body = {"callbackUrl": example_uri,
"status": "RUNNING"}
mgr.api.method_post = Mock(return_value=(None, ret_body))
stat_body = {"status": "complete",
"response": {mgr.response_key: [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "[email protected]",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}}
mgr.api.method_get = Mock(return_value=(None, stat_body))
dom = mgr._create("fake", {})
self.assertTrue(isinstance(dom, CloudDNSDomain))
def test_manager_create_error(self):
clt = self.client
mgr = clt._manager
ret_body = {"callbackUrl": example_uri,
"status": "RUNNING"}
mgr.api.method_post = Mock(return_value=(None, ret_body))
stat_body = {"status": "ERROR",
"error": {
"details": "fail",
"code": 666}}
mgr.api.method_get = Mock(return_value=(None, stat_body))
self.assertRaises(exc.DomainCreationFailed, mgr._create, "fake", {})
def test_manager_findall(self):
clt = self.client
mgr = clt._manager
mgr._list = Mock()
mgr.findall(name="fake")
mgr._list.assert_called_once_with("/domains?name=fake", list_all=True)
def test_manager_findall_default(self):
clt = self.client
mgr = clt._manager
sav = BaseManager.findall
BaseManager.findall = Mock()
mgr.findall(foo="bar")
BaseManager.findall.assert_called_once_with(foo="bar")
BaseManager.findall = sav
def test_manager_empty_get_body_error(self):
clt = self.client
mgr = clt._manager
mgr.api.method_get = Mock(return_value=(None, None))
self.assertRaises(exc.ServiceResponseFailure, mgr.list)
def test_create_body(self):
mgr = self.client._manager
fake_name = utils.random_unicode()
body = mgr._create_body(fake_name, "[email protected]")
self.assertEqual(body["domains"][0]["name"], fake_name)
def test_async_call_body(self):
clt = self.client
mgr = clt._manager
body = {"fake": "fake"}
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "COMPLETE"}
method = "PUT"
clt.method_put = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, body=body, method=method)
clt.method_put.assert_called_once_with(uri, body=body)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp["response"]))
def test_async_call_no_body(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "COMPLETE"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, method=method)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp["response"]))
def test_async_call_no_response(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"status": "COMPLETE"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
ret = mgr._async_call(uri, method=method, has_response=False)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
self.assertEqual(ret, ({}, get_resp))
def test_async_call_timeout(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
clt.set_timeout(0.000001)
clt.method_get = Mock(return_value=({}, {"callbackUrl": callback_uri,
"status": "RUNNING"}))
self.assertRaises(exc.DNSCallTimedOut, mgr._async_call, uri,
method="GET")
def test_async_call_error(self):
clt = self.client
mgr = clt._manager
uri = "http://example.com"
callback_uri = "https://fake.example.com/status/fake"
massaged_uri = "/status/fake?showDetails=true"
put_resp = {"callbackUrl": callback_uri,
"status": "RUNNING"}
get_resp = {"response": {"result": "fake"},
"status": "ERROR"}
method = "DELETE"
clt.method_delete = Mock(return_value=({}, put_resp))
clt.method_get = Mock(return_value=({}, get_resp))
err_class = exc.DomainRecordDeletionFailed
err = err_class("oops")
mgr._process_async_error = Mock(side_effect=err)
self.assertRaises(err_class,
mgr._async_call, uri, method=method, error_class=err_class)
clt.method_delete.assert_called_once_with(uri)
clt.method_get.assert_called_once_with(massaged_uri)
mgr._process_async_error.assert_called_once_with(get_resp, err_class)
def test_process_async_error(self):
clt = self.client
mgr = clt._manager
err = {"error": {"message": "fake", "details": "", "code": 400}}
err_class = exc.DomainRecordDeletionFailed
self.assertRaises(err_class, mgr._process_async_error, err, err_class)
def test_process_async_error_nested(self):
clt = self.client
mgr = clt._manager
err = {"error": {
"failedItems": {"faults": [
{"message": "fake1", "details": "", "code": 400},
{"message": "fake2", "details": "", "code": 400},
]}}}
err_class = exc.DomainRecordDeletionFailed
self.assertRaises(err_class, mgr._process_async_error, err, err_class)
def test_changes_since(self):
clt = self.client
dom = self.domain
clt.method_get = Mock(return_value=({}, {"changes": ["fake"]}))
dt = "2012-01-01"
ret = clt.changes_since(dom, dt)
uri = "/domains/%s/changes?since=2012-01-01T00:00:00+0000" % dom.id
clt.method_get.assert_called_once_with(uri)
self.assertEqual(ret, ["fake"])
def test_export_domain(self):
clt = self.client
dom = self.domain
export = utils.random_unicode()
clt._manager._async_call = Mock(return_value=({}, {"contents": export}))
ret = clt.export_domain(dom)
uri = "/domains/%s/export" % dom.id
clt._manager._async_call.assert_called_once_with(uri,
error_class=exc.NotFound, method="GET")
self.assertEqual(ret, export)
def test_import_domain(self):
clt = self.client
mgr = clt._manager
data = utils.random_unicode()
mgr._async_call = Mock(return_value=({}, "fake"))
req_body = {"domains": [{
"contentType": "BIND_9",
"contents": data,
}]}
ret = clt.import_domain(data)
mgr._async_call.assert_called_once_with("/domains/import",
method="POST", body=req_body,
error_class=exc.DomainCreationFailed)
def test_update_domain_empty(self):
self.assertRaises(exc.MissingDNSSettings, self.client.update_domain,
self.domain)
def test_update_domain(self):
clt = self.client
dom = self.domain
mgr = clt._manager
emailAddress = None
comment = utils.random_unicode()
ttl = 666
mgr._async_call = Mock(return_value=({}, "fake"))
uri = "/domains/%s" % utils.get_id(dom)
req_body = {"comment": comment,
"ttl": ttl,
}
ret = clt.update_domain(dom, emailAddress, ttl, comment)
mgr._async_call.assert_called_once_with(uri, method="PUT",
body=req_body, error_class=exc.DomainUpdateFailed,
has_response=False)
def test_delete(self):
clt = self.client
mgr = clt._manager
dom = self.domain
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s" % utils.get_id(dom)
clt.delete(dom)
mgr._async_call.assert_called_once_with(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False)
def test_delete_subdomains(self):
clt = self.client
mgr = clt._manager
dom = self.domain
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s?deleteSubdomains=true" % utils.get_id(dom)
clt.delete(dom, delete_subdomains=True)
mgr._async_call.assert_called_once_with(uri, method="DELETE",
error_class=exc.DomainDeletionFailed, has_response=False)
def test_list_subdomains(self):
clt = self.client
mgr = clt._manager
dom = self.domain
resp_body = {'Something': 'here'}
clt.method_get = Mock(return_value=({}, resp_body))
uri = "/domains?name=%s&limit=5" % dom.name
clt.list_subdomains(dom, limit=5)
clt.method_get.assert_called_once_with(uri)
def test_list_records(self):
clt = self.client
mgr = clt._manager
dom = self.domain
resp_body = {'Something': 'here'}
clt.method_get = Mock(return_value=({}, resp_body))
uri = "/domains/%s/records" % utils.get_id(dom)
clt.list_records(dom)
clt.method_get.assert_called_once_with(uri)
def test_search_records(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
uri = "/domains/%s/records?type=%s" % (utils.get_id(dom), typ)
ret_body = {"records": [{"type": typ}]}
mgr.count = 0
def mock_get(uri):
if mgr.count:
return ({}, ret_body)
mgr.count += 1
ret = {"totalEntries": 2,
"links": [
{"href": uri,
"rel": "next"}]}
ret.update(ret_body)
return ({}, ret)
clt.method_get = Mock(wraps=mock_get)
clt.search_records(dom, typ)
calls = [call(uri), call(uri)]
clt.method_get.assert_has_calls(calls)
def test_search_records_params(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
resp_body = {"Something": "here"}
clt.method_get = Mock(return_value=({}, resp_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
clt.search_records(dom, typ, name=nm, data=data)
clt.method_get.assert_called_once_with(uri)
def test_find_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
ret_body = {"records": [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "[email protected]",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}
clt.method_get = Mock(return_value=({}, ret_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
clt.find_record(dom, typ, name=nm, data=data)
clt.method_get.assert_called_once_with(uri)
def test_find_record_not_found(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
ret_body = {"records": []}
clt.method_get = Mock(return_value=({}, ret_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
self.assertRaises(exc.DomainRecordNotFound, clt.find_record, dom, typ,
name=nm, data=data)
def test_find_record_not_unique(self):
clt = self.client
mgr = clt._manager
dom = self.domain
typ = "A"
nm = utils.random_unicode()
data = "0.0.0.0"
ret_body = {"records": [{
"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "[email protected]",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}, {"accountId": "728829",
"created": "2012-09-21T21:32:27.000+0000",
"emailAddress": "[email protected]",
"id": "3448214",
"name": "example.com",
"updated": "2012-09-21T21:35:45.000+0000"
}]}
clt.method_get = Mock(return_value=({}, ret_body))
uri = "/domains/%s/records?type=%s&name=%s&data=%s" % (
utils.get_id(dom), typ, nm, data)
self.assertRaises(exc.DomainRecordNotUnique, clt.find_record, dom, typ,
name=nm, data=data)
def test_add_records(self):
clt = self.client
mgr = clt._manager
dom = self.domain
rec = {"type": "A", "name": "example.com", "data": "0.0.0.0"}
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s/records" % utils.get_id(dom)
clt.add_records(dom, rec)
mgr._async_call.assert_called_once_with(uri, method="POST",
body={"records": [rec]},
error_class=exc.DomainRecordAdditionFailed,
has_response=False)
def test_get_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
nm = utils.random_unicode()
rec_id = utils.random_unicode()
rec_dict = {"id": rec_id, "name": nm}
mgr.api.method_get = Mock(return_value=(None, rec_dict))
ret = clt.get_record(dom, rec_id)
mgr.api.method_get.assert_called_once_with("/%s/%s/records/%s" %
(mgr.uri_base, dom.id, rec_id))
def test_update_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
nm = utils.random_unicode()
rec_id = utils.random_unicode()
rec = fakes.FakeDNSRecord(mgr, {"id": rec_id, "name": nm})
ttl = 9999
data = "0.0.0.0"
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s/records" % utils.get_id(dom)
req_body = {"id": rec_id, "name": nm, "data": data, "ttl": ttl}
clt.update_record(dom, rec, data=data, ttl=ttl)
mgr._async_call.assert_called_once_with(uri, method="PUT",
body={"records": [req_body]},
error_class=exc.DomainRecordUpdateFailed,
has_response=False)
def test_delete_record(self):
clt = self.client
mgr = clt._manager
dom = self.domain
rec = CloudDNSRecord(mgr, {"id": utils.random_unicode()})
mgr._async_call = Mock(return_value=({}, {}))
uri = "/domains/%s/records/%s" % (utils.get_id(dom), utils.get_id(rec))
clt.delete_record(dom, rec)
mgr._async_call.assert_called_once_with(uri, method="DELETE",
error_class=exc.DomainRecordDeletionFailed,
has_response=False)
def test_resolve_device_type(self):
clt = self.client
mgr = clt._manager
device = fakes.FakeDNSDevice()
typ = mgr._resolve_device_type(device)
self.assertEqual(typ, "loadbalancer")
device = fakes.FakeLoadBalancer()
typ = mgr._resolve_device_type(device)
self.assertEqual(typ, "loadbalancer")
def test_resolve_device_type_invalid(self):
clt = self.client
mgr = clt._manager
device = object()
self.assertRaises(exc.InvalidDeviceType, mgr._resolve_device_type,
device)
def test_get_ptr_details_lb(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
dvc_type = "loadbalancer"
sav = pyrax._get_service_endpoint
pyrax._get_service_endpoint = Mock(return_value=example_uri)
expected_href = "%s/loadbalancers/%s" % (example_uri, dvc.id)
href, svc_name = mgr._get_ptr_details(dvc, dvc_type)
self.assertEqual(svc_name, "cloudLoadBalancers")
self.assertEqual(href, expected_href)
pyrax._get_service_endpoint = sav
def test_list_ptr_records(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
uri = "/rdns/%s?href=%s" % (svc_name, href)
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
clt.method_get = Mock(return_value=({}, {"records": []}))
ret = clt.list_ptr_records(dvc)
clt.method_get.assert_called_once_with(uri)
self.assertEqual(ret, [])
def test_list_ptr_records_not_found(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
uri = "/rdns/%s?href=%s" % (svc_name, href)
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
clt.method_get = Mock(side_effect=exc.NotFound(""))
ret = clt.list_ptr_records(dvc)
clt.method_get.assert_called_once_with(uri)
self.assertEqual(ret, [])
def test_add_ptr_records(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
rec = {"foo": "bar"}
body = {"recordsList": {"records": [rec]},
"link": {"content": "", "href": href, "rel": svc_name}}
uri = "/rdns"
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
mgr._async_call = Mock(return_value=({}, {"records": []}))
clt.add_ptr_records(dvc, rec)
mgr._async_call.assert_called_once_with(uri, body=body,
error_class=exc.PTRRecordCreationFailed, method="POST")
def test_update_ptr_record(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
ptr_record = fakes.FakeDNSPTRRecord({"id": utils.random_unicode()})
ttl = 9999
data = "0.0.0.0"
long_comment = "x" * 200
trim_comment = long_comment[:160]
nm = "example.com"
rec = {"name": nm, "id": ptr_record.id, "type": "PTR", "data": data,
"ttl": ttl, "comment": trim_comment}
uri = "/rdns"
body = {"recordsList": {"records": [rec]}, "link": {"content": "",
"href": href, "rel": svc_name}}
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
mgr._async_call = Mock(return_value=({}, {"records": []}))
clt.update_ptr_record(dvc, ptr_record, domain_name=nm, data=data,
ttl=ttl, comment=long_comment)
mgr._async_call.assert_called_once_with(uri, body=body,
error_class=exc.PTRRecordUpdateFailed, method="PUT",
has_response=False)
def test_delete_ptr_records(self):
clt = self.client
mgr = clt._manager
dvc = fakes.FakeDNSDevice()
href = "%s/%s" % (example_uri, dvc.id)
svc_name = "cloudServersOpenStack"
ip_address = "0.0.0.0"
uri = "/rdns/%s?href=%s&ip=%s" % (svc_name, href, ip_address)
mgr._get_ptr_details = Mock(return_value=(href, svc_name))
mgr._async_call = Mock(return_value=({}, {"records": []}))
ret = clt.delete_ptr_records(dvc, ip_address=ip_address)
mgr._async_call.assert_called_once_with(uri,
error_class=exc.PTRRecordDeletionFailed,
method="DELETE", has_response=False)
def test_get_absolute_limits(self):
clt = self.client
rand_limit = utils.random_unicode()
resp = {"limits": {"absolute": rand_limit}}
clt.method_get = Mock(return_value=({}, resp))
ret = clt.get_absolute_limits()
self.assertEqual(ret, rand_limit)
def test_get_rate_limits(self):<|fim▁hole|> resp = {"limits": {"rate": limits}}
resp_limits = [{"uri": "fake1", "limits": 1},
{"uri": "fake2", "limits": 2}]
clt.method_get = Mock(return_value=({}, resp))
ret = clt.get_rate_limits()
self.assertEqual(ret, resp_limits)
def test_results_iterator(self):
clt = self.client
mgr = clt._manager
self.assertRaises(NotImplementedError, ResultsIterator, mgr)
def test_iter(self):
clt = self.client
mgr = clt._manager
res_iter = DomainResultsIterator(mgr)
ret = res_iter.__iter__()
self.assertTrue(ret is res_iter)
def test_iter_next(self):
clt = self.client
mgr = clt._manager
res_iter = DomainResultsIterator(mgr)
clt.method_get = Mock(return_value=({}, {"domains": []}))
self.assertRaises(StopIteration, res_iter.next)
def test_iter_items_first_fetch(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
clt.method_get = Mock(return_value=({}, ret_body))
res_iter = DomainResultsIterator(mgr)
ret = res_iter.next()
self.assertTrue(isinstance(ret, CloudDNSDomain))
clt.method_get.assert_called_once_with("/domains")
def test_iter_items_next_fetch(self):
clt = self.client
mgr = clt._manager
fake_name = utils.random_unicode()
ret_body = {"domains": [{"name": fake_name}]}
clt.method_get = Mock(return_value=({}, ret_body))
res_iter = DomainResultsIterator(mgr)
res_iter.next_uri = example_uri
ret = res_iter.next()
self.assertTrue(isinstance(ret, CloudDNSDomain))
def test_iter_items_next_stop(self):
clt = self.client
mgr = clt._manager
res_iter = DomainResultsIterator(mgr)
res_iter.next_uri = None
self.assertRaises(StopIteration, res_iter.next)
def test_subdomain_iter(self):
clt = self.client
mgr = clt._manager
res_iter = SubdomainResultsIterator(mgr)
self.assertEqual(res_iter.paging_service, "subdomain")
def test_record_iter(self):
clt = self.client
mgr = clt._manager
res_iter = RecordResultsIterator(mgr)
self.assertEqual(res_iter.paging_service, "record")
# patch BaseClients method_get to make it always return an empty
# body. client method_get uses super to get at BaseClient's
# method_get.
@patch.object(pyrax.client.BaseClient, "method_get",
new=lambda x, y: (None, None))
def test_client_empty_get_body_error(self):
clt = self.client
self.assertRaises(exc.ServiceResponseFailure, clt.get_absolute_limits)
if __name__ == "__main__":
unittest.main()<|fim▁end|> | clt = self.client
limits = [{"uri": "fake1", "limit": 1},
{"uri": "fake2", "limit": 2}] |
<|file_name|>label.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Tuukka Turto
#
# This file is part of satin-python.
#
# pyherc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyherc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satin-python. If not, see <http://www.gnu.org/licenses/>.
"""
Module for testing labels
"""
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from .enumerators import all_widgets
class LabelMatcher(BaseMatcher):
"""
Check if Widget has label with given text
"""
def __init__(self, text):
"""
Default constructor
"""
super(LabelMatcher, self).__init__()
if hasattr(text, 'matches'):
self.text = text
else:
self.text = wrap_matcher(text)
def _matches(self, item):
"""
Check if matcher matches item
:param item: object to match against
:returns: True if matching, otherwise False
:rtype: Boolean
"""
widgets = all_widgets(item)
for widget in widgets:
if hasattr(widget, 'text') and self.text.matches(widget.text()):
return True
return False
def describe_to(self, description):
"""
Describe this matcher
"""
description.append('Control with label {0}'.format(self.text))
def describe_mismatch(self, item, mismatch_description):
"""
Describe this mismatch
"""
mismatch_description.append(
'QLabel with text {0} was not found'.format(self.text))
def has_label(text):
"""
Check if Widget has label with given text
"""
return LabelMatcher(text)<|fim▁end|> | |
<|file_name|>assignability-trait.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests that type assignability is used to search for instances when<|fim▁hole|>
trait iterable<A> {
fn iterate(&self, blk: &fn(x: &A) -> bool) -> bool;
}
impl<'self,A> iterable<A> for &'self [A] {
fn iterate(&self, f: &fn(x: &A) -> bool) -> bool {
self.iter().advance(f)
}
}
impl<A> iterable<A> for ~[A] {
fn iterate(&self, f: &fn(x: &A) -> bool) -> bool {
self.iter().advance(f)
}
}
fn length<A, T: iterable<A>>(x: T) -> uint {
let mut len = 0;
do x.iterate() |_y| { len += 1; true };
return len;
}
pub fn main() {
let x = ~[0,1,2,3];
// Call a method
do x.iterate() |y| { assert!(x[*y] == *y); true };
// Call a parameterized function
assert_eq!(length(x.clone()), x.len());
// Call a parameterized function, with type arguments that require
// a borrow
assert_eq!(length::<int, &[int]>(x), x.len());
// Now try it with a type that *needs* to be borrowed
let z = [0,1,2,3];
// Call a method
do z.iterate() |y| { assert!(z[*y] == *y); true };
// Call a parameterized function
assert_eq!(length::<int, &[int]>(z), z.len());
}<|fim▁end|> | // making method calls, but only if there aren't any matches without
// it. |
<|file_name|>Admin.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Feb 09, 2018
@author: Tyranic-Moron
"""
from twisted.plugin import IPlugin
from pymoronbot.moduleinterface import IModule
from pymoronbot.modules.commandinterface import BotCommand, admin
from zope.interface import implementer
import re
from collections import OrderedDict
from pymoronbot.response import IRCResponse, ResponseType
@implementer(IPlugin, IModule)
class Admin(BotCommand):
def triggers(self):
return ['admin']
@admin("Only my admins may add new admins!")
def _add(self, message):
"""add <nick/full hostmask> - adds the specified user to the bot admins list.
You can list multiple users to add them all at once.
Nick alone will be converted to a glob hostmask, eg: *!user@host"""
if len(message.ParameterList) < 2:
return IRCResponse(ResponseType.Say,
u"You didn't give me a user to add!",
message.ReplyTo)
for admin in message.ParameterList[1:]:
if message.ReplyTo in self.bot.channels:
if admin in self.bot.channels[message.ReplyTo].Users:
user = self.bot.channels[message.ReplyTo].Users[admin]
admin = u'*!{}@{}'.format(user.User, user.Hostmask)
admins = self.bot.config.getWithDefault('admins', [])
admins.append(admin)
self.bot.config['admins'] = admins
self.bot.config.writeConfig()
return IRCResponse(ResponseType.Say,
u"Added specified users as bot admins!",
message.ReplyTo)
@admin("Only my admins may remove admins!")
def _del(self, message):
"""del <full hostmask> - removes the specified user from the bot admins list.
You can list multiple users to remove them all at once."""
if len(message.ParameterList) < 2:
return IRCResponse(ResponseType.Say,
u"You didn't give me a user to remove!",
message.ReplyTo)
deleted = []
skipped = []
admins = self.bot.config.getWithDefault('admins', [])
for admin in message.ParameterList[1:]:
if message.ReplyTo in self.bot.channels:
if admin in self.bot.channels[message.ReplyTo].Users:
user = self.bot.channels[message.ReplyTo].Users[admin]
admin = u'*!{}@{}'.format(user.User, user.Hostmask)
if admin not in admins:
skipped.append(admin)
continue
admins.remove(admin)
deleted.append(admin)
self.bot.config['admins'] = admins
self.bot.config.writeConfig()
return IRCResponse(ResponseType.Say,
u"Removed '{}' as admin(s), {} skipped"
.format(u', '.join(deleted), len(skipped)),
message.ReplyTo)
def _list(self, message):
"""list - lists all admins"""
owners = self.bot.config.getWithDefault('owners', [])
admins = self.bot.config.getWithDefault('admins', [])
return IRCResponse(ResponseType.Say,
u"Owners: {} | Admins: {}".format(u', '.join(owners),
u', '.join(admins)),
message.ReplyTo)
subCommands = OrderedDict([
(u'add', _add),
(u'del', _del),
(u'list', _list)])
def help(self, query):
"""
@type query: list[str]
@rtype str
"""
if len(query) > 1:
subCommand = query[1].lower()
if subCommand in self.subCommands:
return u'{1}admin {0}'.format(re.sub(r"\s+", u" ", self.subCommands[subCommand].__doc__),
self.bot.commandChar)
else:
return self._unrecognizedSubcommand(subCommand)<|fim▁hole|> def _helpText(self):
return u"{1}admin ({0}) - manages users with bot admin permissions. " \
u"Use '{1}help admin <subcommand> for subcommand help.".format(u'/'.join(self.subCommands.keys()),
self.bot.commandChar)
def _unrecognizedSubcommand(self, subCommand):
return u"unrecognized subcommand '{}', " \
u"available subcommands for admin are: {}".format(subCommand, u', '.join(self.subCommands.keys()))
def execute(self, message):
if len(message.ParameterList) > 0:
subCommand = message.ParameterList[0].lower()
if subCommand not in self.subCommands:
return IRCResponse(ResponseType.Say,
self._unrecognizedSubcommand(subCommand),
message.ReplyTo)
return self.subCommands[subCommand](self, message)
else:
return IRCResponse(ResponseType.Say,
self._helpText(),
message.ReplyTo)
adminCommand = Admin()<|fim▁end|> | else:
return self._helpText()
|
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os<|fim▁hole|>
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myinventory.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)<|fim▁end|> | import sys |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "news_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other<|fim▁hole|> try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)<|fim▁end|> | # exceptions on Python 2. |
<|file_name|>DrawingUtils.ts<|end_file_name|><|fim▁begin|>/// <reference path="vector2d.ts" />
function fillCircle(x: number, y: number, radius: number, color: string) {
context.beginPath();
context.arc(x, y, radius, 0, Math.PI * 2);
context.fillStyle = color;
context.fill();
}
function fillCircleWithFace(context: CanvasRenderingContext2D, x: number, y: number, radius: number, color: string, face: number) {
fillCircle(x, y, radius, color);
var unit = Vector2D.unitFromAngle(face);
<|fim▁hole|> context.beginPath();
context.moveTo(x - unit.y * radius, y + unit.x * radius);
context.lineTo(x + unit.x * radius * 1.5, y + unit.y * radius * 1.5);
context.lineTo(x + unit.y * radius, y - unit.x * radius);
context.closePath();
context.fill();
}<|fim▁end|> | |
<|file_name|>permissions.py<|end_file_name|><|fim▁begin|>from rest_framework.permissions import BasePermission
<|fim▁hole|>
class IsOwnerOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user<|fim▁end|> | |
<|file_name|>0003_auto_20160810_1219.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-10 04:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20160810_0134'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='content_1',
),
migrations.AddField(<|fim▁hole|> ),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=100, verbose_name='标题'),
),
]<|fim▁end|> | model_name='article',
name='content',
field=models.CharField(default=0, max_length=10000, verbose_name='内容'),
preserve_default=False, |
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
<|fim▁hole|> execute_from_command_line(sys.argv)<|fim▁end|> | from django.core.management import execute_from_command_line
|
<|file_name|>confirm_test.js<|end_file_name|><|fim▁begin|>module("funcunit - jQuery API",{
setup: function() {
S.open("//funcunit/test/confirm.html")
}
})
test("confirm overridden", function(){
S('#confirm').click().wait(1000, function(){
equal(S('#confirm').text(), "I was confirmed", "confirmed overriden to return true");
});
});<|fim▁hole|>
test("alert overridden", function(){
S('#alert').click().wait(1000, function(){
equal(S('#alert').text(), "I was alert", "alert overriden to return true");
});
});<|fim▁end|> | |
<|file_name|>proxy.py<|end_file_name|><|fim▁begin|># HTTP proxy for serving static resources and forwarding requests to Synthese.
# @file proxy.py
# @author Sylvain Pasche
#
# This file belongs to the SYNTHESE project (public transportation specialized software)
# Copyright (C) 2002 Hugues Romain - RCSmobility <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import httplib
import logging
import os
import threading
import time
import urllib
import urlparse
from wsgiref import simple_server
import static
from paste.proxy import parse_headers
from paste.proxy import Proxy
import werkzeug.wsgi
from synthesepy import utils
log = logging.getLogger(__name__)
# The Synthese daemon expects a x-forwarded-host header when getting a proxied
# request. Paste proxy doesn't provide it, so we monkey patch the __call__
# method here. The modification from Paste are shown in comments.
def paste_proxy_patched_call(self, environ, start_response):
if (self.allowed_request_methods and
environ['REQUEST_METHOD'].lower() not in self.allowed_request_methods):
return httpexceptions.HTTPBadRequest("Disallowed")(environ, start_response)
if self.scheme == 'http':
ConnClass = httplib.HTTPConnection
elif self.scheme == 'https':
ConnClass = httplib.HTTPSConnection
else:
raise ValueError(
"Unknown scheme for %r: %r" % (self.address, self.scheme))
conn = ConnClass(self.host)
headers = {}
for key, value in environ.items():
if key.startswith('HTTP_'):
key = key[5:].lower().replace('_', '-')
if key == 'host' or key in self.suppress_http_headers:
continue
headers[key] = value
headers['host'] = self.host
if 'REMOTE_ADDR' in environ:
headers['x-forwarded-for'] = environ['REMOTE_ADDR']
# synthese modification
if 'HTTP_HOST' in environ:
headers['x-forwarded-host'] = environ['HTTP_HOST']
# end of synthese modification
if environ.get('CONTENT_TYPE'):
headers['content-type'] = environ['CONTENT_TYPE']
if environ.get('CONTENT_LENGTH'):
if environ['CONTENT_LENGTH'] == '-1':
# This is a special case, where the content length is basically undetermined
body = environ['wsgi.input'].read(-1)
headers['content-length'] = str(len(body))
else:
headers['content-length'] = environ['CONTENT_LENGTH']
length = int(environ['CONTENT_LENGTH'])
body = environ['wsgi.input'].read(length)
else:
body = ''
path_info = urllib.quote(environ['PATH_INFO'])
if self.path:
request_path = path_info
if request_path and request_path[0] == '/':
request_path = request_path[1:]
path = urlparse.urljoin(self.path, request_path)
else:
path = path_info
if environ.get('QUERY_STRING'):
path += '?' + environ['QUERY_STRING']
conn.request(environ['REQUEST_METHOD'],
path,
body, headers)
res = conn.getresponse()
headers_out = parse_headers(res.msg)
status = '%s %s' % (res.status, res.reason)
start_response(status, headers_out)
# @@: Default?
length = res.getheader('content-length')
if length is not None:
body = res.read(int(length))
else:
body = res.read()
conn.close()
return [body]
Proxy.__call__ = paste_proxy_patched_call
class WSGIProxy(object):
"""
Runs a HTTP server to serve static files. Requests for the Synthese daemon
are proxied to its configured port.
"""
SYNTHESE_SUFFIXES = ['/synthese', '/synthese3', '/admin']
ADMIN_URL = '/admin/synthese?fonction=admin&mt=17732923532771328&tt=17732923532771329&pt=17732923532771330'
def __init__(self, env, project):
self.env = env
self.proxy_app = Proxy('http://localhost:%s/' % env.c.port)
# import here to avoid circular dependencies.
from synthesepy import web
self.web_app = web.get_application(project=project)
self.static_apps = []
for base, path in env.c.static_paths:
self.static_apps.append((base, static.Cling(path)))
def _redirect(self, environ, start_response, url):
if not url.startswith('http://'):
url = 'http://' + environ['HTTP_HOST'] + url
start_response('302 Found', [
('Location', url),
('Content-type', 'text/plain')])
return '302 Found'
def add_utf8_header(self, start_response):
def start_response_wrapper(status, headers):
headers_dict = dict(headers)
if headers_dict['Content-Type'] == 'text/html':
headers_dict['Content-Type'] = 'text/html; charset=UTF-8'
return start_response(status, headers_dict.items())
return start_response_wrapper
def _handle_static_files(self, environ, start_response):
path_info = environ.get('PATH_INFO', '')
for base, app in self.static_apps:
if not path_info[1:].startswith(base):
continue
path_info = '/' + path_info[1 + len(base):]
environ['PATH_INFO'] = path_info
path = app.root + path_info
if (os.path.isfile(path) or
(path_info.endswith('/') and os.path.isfile(
os.path.join(path, 'index.html')))):
return app(environ, start_response)
log.debug('Path %r not found in any static directories, forwarding '
'to Synthese for Smart URL', path_info)
smart_url_path = path_info[1:]
# Warning: this removes duplicate GET parameters.
qs = dict(urlparse.parse_qsl(environ['QUERY_STRING']))
qs.update(dict(
SERVICE='page',
si=self.env.c.site_id,
smart_url='/' + smart_url_path,
))
environ['PATH_INFO'] = self.SYNTHESE_SUFFIXES[0]
environ['QUERY_STRING'] = urllib.urlencode(qs)
return self.proxy_app(environ, self.add_utf8_header(start_response))
def __call__(self, environ, start_response):
path_info = environ['PATH_INFO']
# Web app
WEB_APP_PREFIX = '/w/'
if path_info.startswith(WEB_APP_PREFIX):
werkzeug.wsgi.pop_path_info(environ)
return self.web_app(environ, start_response)
# Admin redirect helpers.
if path_info in ('/admin', '/admin/'):
return self._redirect(environ, start_response, self.ADMIN_URL)
if path_info.endswith(
tuple(self.SYNTHESE_SUFFIXES + self.env.c.synthese_suffixes)):
return self.proxy_app(environ, self.add_utf8_header(start_response))
return self._handle_static_files(environ, start_response)
# Paste httpd is threaded, which should provide better performance.
USE_PASTE_HTTPD = True
wsgi_httpd = None
def start(env, project):
global wsgi_httpd
if USE_PASTE_HTTPD:
import paste.httpserver
paste_log = logging.getLogger('paste.httpserver.ThreadPool')
paste_log.setLevel(logging.WARNING)
wsgi_httpd = paste.httpserver.serve(
WSGIProxy(env, project), '0.0.0.0', env.c.wsgi_proxy_port, start_loop=False)
else:
wsgi_httpd = simple_server.make_server(
'', env.c.wsgi_proxy_port, WSGIProxy(env))
log.info('WSGI proxy serving on http://localhost:%s' %
env.c.wsgi_proxy_port)
threading.Thread(target=wsgi_httpd.serve_forever).start()
def stop():
global wsgi_httpd
# Shutting down method differs:
# simple_server.simple_server throws an exception when calling
# server_close() and paste.httpd hangs if shutdown() is called.
if USE_PASTE_HTTPD:
wsgi_httpd.server_close()
else:
wsgi_httpd.shutdown()<|fim▁hole|> utils.kill_listening_processes(env.c.wsgi_proxy_port)
if utils.can_connect(env.c.wsgi_proxy_port):
raise Exception(
'Error, something is already listening on port %s',
env.c.wsgi_proxy_port)
proxy = start(env, project)
log.info('Proxy running, press ctrl-c to stop')
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
print '^C'
log.info('Stopping proxy')
stop()<|fim▁end|> |
def serve_forever(env, project): |
<|file_name|>karma.conf.js<|end_file_name|><|fim▁begin|><|fim▁hole|>
files: [
'www/lib/**/*.js',
'www/**/*.js'
],
preprocessors: {
'www/app.js': 'coverage',
'www/**/*.js': 'sourcemap'
},
autoWatch: true,
frameworks: [
'jasmine'
],
browsers: ['Chrome', 'Safari'],
plugins: [
'karma-chrome-launcher',
'karma-coverage',
'karma-jasmine',
'karma-safari-launcher',
'karma-spec-reporter',
'karma-sourcemap-loader'
],
logLevel: 'warn',
loggers: [
{type: 'console'}
],
reporters: ['spec', 'coverage'],
coverageReporter: {
dir: 'coverage/',
reporters: [
{type: 'html', subdir: 'html'},
{type: 'text', subdir: '.', file: 'coverage.txt'},
{type: 'json', subdir: '.', file: 'coverage-karma.json'},
{type: 'text-summary'}
]
}
});
};<|fim▁end|> | module.exports = function (config) {
config.set({
basePath: './', |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod case;
mod functions;
mod parse;
mod splitter;
pub(crate) use self::{
parse::parse,
splitter::{StatementError, StatementSplitter},
};
use shell::flow_control::Statement;
/// Parses a given statement string and return's the corresponding mapped
/// `Statement`
pub(crate) fn parse_and_validate<'a>(statement: Result<String, StatementError>) -> Statement {
match statement {
Ok(statement) => parse(statement.as_str()),
Err(err) => {
eprintln!("ion: {}", err);
Statement::Error(-1)
}
}
}
/// Splits a string into two, based on a given pattern. We know that the first string will always
/// exist, but if the pattern is not found, or no string follows the pattern, then the second
/// string will not exist. Useful for splitting the function expression by the "--" pattern.<|fim▁hole|> let args = &arg[..pos].trim();
let comment = &arg[pos + pattern.len()..].trim();
if comment.is_empty() {
(args, None)
} else {
(args, Some(comment))
}
}
None => (arg, None),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn statement_pattern_splitting() {
let (args, description) = split_pattern("a:int b:bool -- a comment", "--");
assert_eq!(args, "a:int b:bool");
assert_eq!(description, Some("a comment"));
let (args, description) = split_pattern("a --", "--");
assert_eq!(args, "a");
assert_eq!(description, None);
let (args, description) = split_pattern("a", "--");
assert_eq!(args, "a");
assert_eq!(description, None);
}
}<|fim▁end|> | pub(crate) fn split_pattern<'a>(arg: &'a str, pattern: &str) -> (&'a str, Option<&'a str>) {
match arg.find(pattern) {
Some(pos) => { |
<|file_name|>mallet_lda_tags.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import traceback
import mallet_lda
class MalletTagTopics(mallet_lda.MalletLDA):
"""
Topic modeling with separation based on tags
"""
def _basic_params(self):
self.name = 'mallet_lda_tags'
self.categorical = False
self.template_name = 'mallet_lda'
self.dry_run = False
self.topics = 50
self.dfr = len(self.extra_args) > 0
if self.dfr:
self.dfr_dir = self.extra_args[0]
def post_setup(self):<|fim▁hole|> if 'tags' in self.named_args:
self.tags = self.named_args['tags']
for filename in self.metadata.keys():
my_tags = [x for (x, y) in self.tags.iteritems()
if int(self.metadata[filename]['itemID'
]) in y]
if len(my_tags) > 0:
self.metadata[filename]['label'] = my_tags[0]
else:
del self.metadata[filename]
self.files.remove(filename)
if __name__ == '__main__':
try:
processor = MalletTagTopics(track_progress=False)
processor.process()
except:
logging.error(traceback.format_exc())<|fim▁end|> | if self.named_args is not None: |
<|file_name|>vardecl_local_anchor.rs<|end_file_name|><|fim▁begin|>// Checks that the indexer finds and emits nodes for local variables.<|fim▁hole|> let x: u32;
}<|fim▁end|> | //- VarNode.node/kind variable
fn foo() {
//- @x defines/binding VarNode |
<|file_name|>storn.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Author: Patrick Hung (patrickh @caltech)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
__doc__ = _doc = """
This is part of Storn's "Differential Evolution" test suite, as defined
in [2], with 'Corana' function definitions drawn from [3,4], 'Griewangk'
function definitions drawn from [5], and 'Zimmermann' function definitions
drawn from [6].
References::
[1] Storn, R. and Price, K. "Differential Evolution - A Simple and<|fim▁hole|> [2] Storn, R. and Price, K. "Differential Evolution - A Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces"
TR-95-012, ICSI, 1995. http://www.icsi.berkeley.edu/~storn/TR-95-012.pdf
[3] Ingber, L. "Simulated Annealing: Practice Versus Theory" J. of
Mathematical and Computer Modeling 18(11), 29-57, 1993.
[4] Corana, A. and Marchesi, M. and Martini, C. and Ridella, S.
"Minimizing Multimodal Functions of Continuous Variables with the
'Simulated Annealing Algorithm'" ACM Transactions on Mathematical
Software, March, 272-280, 1987.
[5] Griewangk, A.O. "Generalized Descent for Global Optimization"
Journal of Optimization Theory and Applications 34: 11-39, 1981.
[6] Zimmermann, W. "Operations Research" Oldenbourg Munchen, Wien, 1990.
"""
from abstract_model import AbstractFunction
from numpy import asarray
from math import pow, cos, sqrt
from numpy import sign, floor
class Corana(AbstractFunction):
__doc__ = \
"""a Corana's parabola function generator
Corana's parabola function [1,2,3,4] defines a paraboloid whose
axes are parallel to the coordinate axes. This funciton has a
large number of wells that increase in depth with proximity to
the origin. The global minimum is a plateau around the origin.
The generated function f(x) is a modified version of equation (22)
of [2], where len(x) <= 4.
""" + _doc
def __init__(self, ndim=4): # is n-dimensional n=[1,4] (n=4 in ref)
AbstractFunction.__init__(self, ndim=ndim)
return
def function(self,coeffs):
"""evaluates a 4-D Corana's parabola function for a list of coeffs
f(x) = \sum_(i=0)^(3) f_0(x)
Where for \abs(x_i - z_i) < 0.05:
f_0(x) = 0.15*(z_i - 0.05*\sign(z_i))^(2) * d_i
and otherwise:
f_0(x) = d_i * x_(i)^(2),
with z_i = \floor(\abs(x_i/0.2)+0.49999)*\sign(x_i)*0.2
and d_i = 1,1000,10,100.
For len(x) == 1, x = x_0,0,0,0;
for len(x) == 2, x = x_0,0,x_1,0;
for len(x) == 3, x = x_0,0,x_1,x_2;
for len(x) >= 4, x = x_0,x_1,x_2,x_3.
Inspect with mystic_model_plotter using::
mystic.models.corana -b "-1:1:.01, -1:1:.01" -d -x 1
The minimum is f(x)=0 for \abs(x_i) < 0.05 for all i."""
d = [1., 1000., 10., 100.]
_d = [0, 3, 1, 2] # ordering for lower dimensions
#x = asarray(coeffs) #XXX: converting to numpy.array slows by 10x
x = [0.]*4 # ensure that there are 4 coefficients
if len(coeffs) < 4:
_x = x[:]
_x[:len(coeffs)]=coeffs
for i in range(4):
x[_d.index(i)] = _x[i]
else:
x = coeffs
r = 0
for j in range(4):
zj = floor( abs(x[j]/0.2) + 0.49999 ) * sign(x[j]) * 0.2
if abs(x[j]-zj) < 0.05:
r += 0.15 * pow(zj - 0.05*sign(zj), 2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
minimizers = None #FIXME: degenerate minimum... (-0.05, 0.05)
# minimum is f(x)=0 for \abs(x_i) < 0.05 for all i."""
pass
class Griewangk(AbstractFunction):
__doc__ = \
"""a Griewangk's function generator
Griewangk's function [1,2,5] is a multi-dimensional cosine
function that provides several periodic local minima, with
the global minimum at the origin. The local minima are
fractionally more shallow than the global minimum, such that
when viewed at a very coarse scale the function appears as
a multi-dimensional parabola similar to De Jong's sphere.
The generated function f(x) is a modified version of equation (23)
of [2], where len(x) >= 0.
""" + _doc
def __init__(self, ndim=10): # is n-dimensional (n=10 in ref)
AbstractFunction.__init__(self, ndim=ndim)
return
def function(self,coeffs):
"""evaluates an N-dimensional Griewangk's function for a list of coeffs
f(x) = f_0(x) - f_1(x) + 1
Where:
f_0(x) = \sum_(i=0)^(N-1) x_(i)^(2) / 4000.
and:
f_1(x) = \prod_(i=0)^(N-1) \cos( x_i / (i+1)^(1/2) )
Inspect with mystic_model_plotter using::
mystic.models.griewangk -b "-10:10:.1, -10:10:.1" -d -x 5
The minimum is f(x)=0.0 for x_i=0.0"""
#x = asarray(x) #XXX: converting to numpy.array slows by 10x
term1 = sum([c*c for c in coeffs])/4000.
term2 = 1
for i in range(len(coeffs)):
term2 = term2 * cos( coeffs[i] / sqrt(i+1.0) )
return term1 - term2 + 1
minimizers = [0.] #XXX: there are many periodic local minima
pass
class Zimmermann(AbstractFunction):
__doc__ = \
"""a Zimmermann function generator
A Zimmermann function [1,2,6] poses difficulty for minimizers
as the minimum is located at the corner of the constrained region.
A penalty is applied to all values outside the constrained region,
creating a local minimum.
The generated function f(x) is a modified version of equation (24-26)
of [2], and requires len(x) == 2.
""" + _doc
def __init__(self, ndim=2):
AbstractFunction.__init__(self, ndim=ndim)
return
def function(self,coeffs):
"""evaluates a Zimmermann function for a list of coeffs
f(x) = max(f_0(x), p_i(x)), with i = 0,1,2,3
Where:
f_0(x) = 9 - x_0 - x_1
with for x_0 < 0:
p_0(x) = -100 * x_0
and for x_1 < 0:
p_1(x) = -100 * x_1
and for c_2(x) > 16 and c_3(x) > 14:
p_i(x) = 100 * c_i(x), with i = 2,3
c_2(x) = (x_0 - 3)^2 + (x_1 - 2)^2
c_3(x) = x_0 * x_1
Otherwise, p_i(x)=0 for i=0,1,2,3 and c_i(x)=0 for i=2,3.
Inspect with mystic_model_plotter using::
mystic.models.zimmermann -b "-5:10:.1, -5:10:.1" -d -x 1
The minimum is f(x)=0.0 at x=(7.0,2.0)"""
x0, x1 = coeffs #must provide 2 values (x0,y0)
f8 = 9 - x0 - x1
#XXX: apply penalty p(k) = 100 + 100*k; k = |f(x) - c(x)|
c0,c1,c2,c3 = 0,0,0,0
if x0 < 0: c0 = -100 * x0
if x1 < 0: c1 = -100 * x1
xx = (x0-3.)*(x0-3) + (x1-2.)*(x1-2)
if xx > 16: c2 = 100 * (xx-16)
if x0 * x1 > 14: c3 = 100 * (x0*x1-14.)
return max(f8,c0,c1,c2,c3)
minimizers = [(7., 2.), (2.35477650, 5.94832200)]
#minima = [0.0, 0.69690150]
pass
# cleanup
del _doc
# prepared instances
corana = Corana().function
griewangk = Griewangk().function
zimmermann = Zimmermann().function
# End of file<|fim▁end|> | Efficient Heuristic for Global Optimization over Continuous Spaces"
Journal of Global Optimization 11: 341-359, 1997.
|
<|file_name|>partition_handler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
from gevent import monkey
monkey.patch_all()
import logging
import gevent
from gevent.coros import BoundedSemaphore
from kafka import KafkaClient, KeyedProducer, SimpleConsumer, common
from uveserver import UVEServer
import os
import json
import copy
import traceback
import uuid
import struct
import socket
import discoveryclient.client as client
from sandesh_common.vns.constants import ALARM_PARTITION_SERVICE_NAME
from pysandesh.util import UTCTimestampUsec
import select
import redis
from collections import namedtuple
PartInfo = namedtuple("PartInfo",["ip_address","instance_id","acq_time","port"])
def sse_pack(d):
"""Pack data in SSE format"""
buffer = ''
for k in ['event','data']:
if k in d.keys():
buffer += '%s: %s\n' % (k, d[k])
return buffer + '\n'
class UveStreamPart(gevent.Greenlet):
def __init__(self, partno, logger, q, pi, rpass):
gevent.Greenlet.__init__(self)
self._logger = logger
self._q = q
self._pi = pi
self._partno = partno
self._rpass = rpass
def syncpart(self, redish):
inst = self._pi.instance_id
part = self._partno
keys = list(redish.smembers("AGPARTKEYS:%s:%d" % (inst, part)))
ppe = redish.pipeline()
for key in keys:
ppe.hgetall("AGPARTVALUES:%s:%d:%s" % (inst, part, key))
pperes = ppe.execute()
idx=0
for res in pperes:
for tk,tv in res.iteritems():
msg = {'event': 'sync', 'data':\
json.dumps({'partition':self._partno,
'key':keys[idx], 'type':tk, 'value':tv})}
self._q.put(sse_pack(msg))
idx += 1
def _run(self):
lredis = None
pb = None
while True:
try:
lredis = redis.StrictRedis(
host=self._pi.ip_address,
port=self._pi.port,
password=self._rpass,
db=2)
pb = lredis.pubsub()
inst = self._pi.instance_id
part = self._partno
pb.subscribe('AGPARTPUB:%s:%d' % (inst, part))
self.syncpart(lredis)
for message in pb.listen():
if message["type"] != "message":
continue
dataline = message["data"]
try:
elems = json.loads(dataline)
except:
self._logger.error("AggUVE Parsing failed: %s" % str(message))
continue
else:
self._logger.error("AggUVE loading: %s" % str(elems))
ppe = lredis.pipeline()
for elem in elems:
# This UVE was deleted
if elem["type"] is None:
ppe.exists("AGPARTVALUES:%s:%d:%s" % \
(inst, part, elem["key"]))
else:
ppe.hget("AGPARTVALUES:%s:%d:%s" % \
(inst, part, elem["key"]), elem["type"])
pperes = ppe.execute()
idx = 0
for elem in elems:
if elem["type"] is None:
msg = {'event': 'update', 'data':\
json.dumps({'partition':part,
'key':elem["key"], 'type':None})}
else:
vjson = pperes[idx]
if vjson is None:
vdata = None
else:
vdata = json.loads(vjson)
msg = {'event': 'update', 'data':\
json.dumps({'partition':part,
'key':elem["key"], 'type':elem["type"],
'value':vdata})}
self._q.put(sse_pack(msg))
idx += 1
except gevent.GreenletExit:
break
except Exception as ex:
template = "Exception {0} in uve stream proc. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
lredis = None
if pb is not None:
pb.close()
pb = None
gevent.sleep(2)
return None
class UveStreamer(gevent.Greenlet):
def __init__(self, logger, q, rfile, agp_cb, partitions, rpass):
gevent.Greenlet.__init__(self)
self._logger = logger
self._q = q
self._rfile = rfile
self._agp_cb = agp_cb
self._agp = {}
self._parts = {}
self._partitions = partitions
self._rpass = rpass
def _run(self):
inputs = [ self._rfile ]
outputs = [ ]
msg = {'event': 'init', 'data':\
json.dumps({'partitions':self._partitions})}
self._q.put(sse_pack(msg))
while True:
readable, writable, exceptional = select.select(inputs, outputs, inputs, 1)
if (readable or writable or exceptional):
break
newagp = self._agp_cb()
set_new, set_old = set(newagp.keys()), set(self._agp.keys())
intersect = set_new.intersection(set_old)
# deleted parts
for elem in set_old - intersect:
self.partition_stop(elem)
# new parts
for elem in set_new - intersect:
self.partition_start(elem, newagp[elem])
# changed parts
for elem in intersect:
if self._agp[elem] != newagp[elem]:
self.partition_stop(elem)
self.partition_start(elem, newagp[elem])
self._agp = newagp
for part, pi in self._agp.iteritems():
self.partition_stop(part)
def partition_start(self, partno, pi):
self._logger.error("Starting agguve part %d using %s" %( partno, pi))
msg = {'event': 'clear', 'data':\
json.dumps({'partition':partno, 'acq_time':pi.acq_time})}
self._q.put(sse_pack(msg))
self._parts[partno] = UveStreamPart(partno, self._logger,
self._q, pi, self._rpass)
self._parts[partno].start()
def partition_stop(self, partno):
self._logger.error("Stopping agguve part %d" % partno)
self._parts[partno].kill()
self._parts[partno].get()
del self._parts[partno]
class PartitionHandler(gevent.Greenlet):
def __init__(self, brokers, group, topic, logger, limit):
gevent.Greenlet.__init__(self)
self._brokers = brokers
self._group = group
self._topic = topic
self._logger = logger
self._limit = limit
self._uvedb = {}
self._partoffset = 0
self._kfk = None
def msg_handler(self, mlist):
self._logger.info("%s Reading %s" % (self._topic, str(mlist)))
return True
def _run(self):
pcount = 0
while True:
try:
self._logger.error("New KafkaClient %s" % self._topic)
self._kfk = KafkaClient(self._brokers , "kc-" + self._topic)
try:
consumer = SimpleConsumer(self._kfk, self._group, self._topic, buffer_size = 4096*4, max_buffer_size=4096*32)
#except:
except Exception as ex:
template = "Consumer Failure {0} occured. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.info("%s" % messag)
raise RuntimeError(messag)
self._logger.error("Starting %s" % self._topic)
# Find the offset of the last message that has been queued
consumer.seek(-1,2)
try:
mi = consumer.get_message(timeout=0.1)
consumer.commit()
except common.OffsetOutOfRangeError:
mi = None
#import pdb; pdb.set_trace()
self._logger.info("Last Queued for %s is %s" % \
(self._topic,str(mi)))
# start reading from last previously processed message
if mi != None:
consumer.seek(0,1)
else:
consumer.seek(0,0)
if self._limit:
raise gevent.GreenletExit
while True:
try:
mlist = consumer.get_messages(10,timeout=0.5)
if not self.msg_handler(mlist):
raise gevent.GreenletExit
consumer.commit()
pcount += len(mlist)
except TypeError as ex:
self._logger.error("Type Error: %s trace %s" % \
(str(ex.args), traceback.format_exc()))
gevent.sleep(0.1)
except common.FailedPayloadsError as ex:
self._logger.error("Payload Error: %s" % str(ex.args))
gevent.sleep(0.1)
except gevent.GreenletExit:
break
except AssertionError as ex:
self._partoffset = ex
break
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.error("%s : traceback %s" % \
(messag, traceback.format_exc()))
self.stop_partition()
gevent.sleep(2)
self._logger.error("Stopping %s pcount %d" % (self._topic, pcount))
partdb = self.stop_partition()
return self._partoffset, partdb
class UveStreamProc(PartitionHandler):
# Arguments:
#
# brokers : broker list for kafka bootstrap
# partition : partition number
# uve_topic : Topic to consume
# logger : logging object to use
# callback : Callback function for reporting the set of the UVEs
# that may have changed for a given notification
# rsc : Callback function to check on collector status
# and get sync contents for new collectors
# aginst : instance_id of alarmgen
# rport : redis server port
# disc : discovery client to publish to
def __init__(self, brokers, partition, uve_topic, logger, callback,
host_ip, rsc, aginst, rport, disc = None):
super(UveStreamProc, self).__init__(brokers, "workers",
uve_topic, logger, False)
self._uvedb = {}
self._uvein = {}
self._uveout = {}
self._callback = callback
self._partno = partition
self._host_ip = host_ip
self._ip_code, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, host_ip))
self.disc_rset = set()
self._resource_cb = rsc
self._aginst = aginst
self._disc = disc
self._acq_time = UTCTimestampUsec()
self._rport = rport
def acq_time(self):
return self._acq_time
def resource_check(self, msgs):
'''
This function compares the known collectors with the
list from discovery, and syncs UVE keys accordingly
'''
newset , coll_delete, chg_res = self._resource_cb(self._partno, self.disc_rset, msgs)
for coll in coll_delete:
self._logger.error("Part %d lost collector %s" % (self._partno, coll))
self.stop_partition(coll)
if len(chg_res):
self.start_partition(chg_res)
self.disc_rset = newset
if self._disc:<|fim▁hole|> 'partition' : str(self._partno),
'ip-address': self._host_ip,
'acq-time': str(self._acq_time),
'port':str(self._rport)}
self._disc.publish(ALARM_PARTITION_SERVICE_NAME, data)
def stop_partition(self, kcoll=None):
clist = []
if not kcoll:
clist = self._uvedb.keys()
# If all collectors are being cleared, clear resoures too
self.disc_rset = set()
if self._disc:
# TODO: Unpublish instead of setting acq-time to 0
data = { 'instance-id' : self._aginst,
'partition' : str(self._partno),
'ip-address': self._host_ip,
'acq-time': "0",
'port':str(self._rport)}
self._disc.publish(ALARM_PARTITION_SERVICE_NAME, data)
else:
clist = [kcoll]
self._logger.error("Stopping part %d collectors %s" % \
(self._partno,clist))
partdb = {}
chg = {}
for coll in clist:
partdb[coll] = {}
for gen in self._uvedb[coll].keys():
partdb[coll][gen] = {}
for tab in self._uvedb[coll][gen].keys():
for rkey in self._uvedb[coll][gen][tab].keys():
uk = tab + ":" + rkey
chg[uk] = None
partdb[coll][gen][uk] = \
set(self._uvedb[coll][gen][tab][rkey].keys())
del self._uvedb[coll]
self._logger.error("Stopping part %d UVEs %s" % \
(self._partno,str(chg.keys())))
self._callback(self._partno, chg)
return partdb
def start_partition(self, cbdb):
''' This function loads the initial UVE database.
for the partition
'''
self._logger.error("Starting part %d collectors %s" % \
(self._partno, str(cbdb.keys())))
uves = {}
for kcoll,coll in cbdb.iteritems():
self._uvedb[kcoll] = {}
for kgen,gen in coll.iteritems():
self._uvedb[kcoll][kgen] = {}
for kk in gen.keys():
tabl = kk.split(":",1)
tab = tabl[0]
rkey = tabl[1]
if not tab in self._uvedb[kcoll][kgen]:
self._uvedb[kcoll][kgen][tab] = {}
self._uvedb[kcoll][kgen][tab][rkey] = {}
uves[kk] = {}
for typ, contents in gen[kk].iteritems():
self._uvedb[kcoll][kgen][tab][rkey][typ] = {}
self._uvedb[kcoll][kgen][tab][rkey][typ]["c"] = 0
self._uvedb[kcoll][kgen][tab][rkey][typ]["u"] = \
uuid.uuid1(self._ip_code)
uves[kk][typ] = contents
self._logger.error("Starting part %d UVEs %s" % \
(self._partno, str(uves.keys())))
self._callback(self._partno, uves)
def contents(self):
return self._uvedb
def stats(self):
''' Return the UVEKey-Count stats collected over
the last time period for this partition, and
the incoming UVE Notifs as well.
Also, the stats should be cleared to prepare
for the next period of collection.
'''
ret_out = copy.deepcopy(self._uveout)
ret_in = copy.deepcopy(self._uvein)
self._uveout = {}
self._uvein = {}
return ret_in, ret_out
def msg_handler(self, mlist):
self.resource_check(mlist)
for mm in mlist:
if mm is None:
continue
self._logger.debug("%s Reading offset %d" % \
(self._topic, mm.offset))
if not self.msg_handler_single(mm):
self._logger.info("%s could not handle %s" % \
(self._topic, str(mm)))
return False
return True
def msg_handler_single(self, om):
self._partoffset = om.offset
chg = {}
try:
uv = json.loads(om.message.value)
coll = uv["coll"]
gen = uv["gen"]
if not self._uvedb.has_key(coll):
# This partition is not synced yet.
# Ignore this message
self._logger.debug("%s Ignoring UVE %s" % (self._topic, str(om)))
return True
if not self._uvedb[coll].has_key(gen):
self._uvedb[coll][gen] = {}
if (uv["message"] == "UVEUpdate"):
tabl = uv["key"].split(":",1)
tab = tabl[0]
rkey = tabl[1]
if tab not in self._uvedb[coll][gen]:
self._uvedb[coll][gen][tab] = {}
if not rkey in self._uvedb[coll][gen][tab]:
self._uvedb[coll][gen][tab][rkey] = {}
removed = False
# uv["type"] and uv["value"] can be decoded as follows:
# uv["type"] can be one of the following:
# - None # All Types under this UVE are deleted
# uv["value"] will not be present
# (this option is only for agg UVE updates)
# - "<Struct>" # uv["value"] refers to this struct
# uv["value"] can be one of the following:
# - None # This Type has been deleted.
# - {} # The Type has a value, which is
# not available in this message.
# (this option is only for raw UVE updates)
# - {<Value>} # The Value of the Type
# (this option is only for agg UVE updates)
if uv["type"] is None:
# TODO: Handling of delete UVE case
return False
if uv["value"] is None:
if uv["type"] in self._uvedb[coll][gen][tab][rkey]:
del self._uvedb[coll][gen][tab][rkey][uv["type"]]
if not len(self._uvedb[coll][gen][tab][rkey]):
del self._uvedb[coll][gen][tab][rkey]
removed = True
if not removed:
if uv["type"] in self._uvedb[coll][gen][tab][rkey]:
self._uvedb[coll][gen][tab][rkey][uv["type"]]["c"] +=1
else:
self._uvedb[coll][gen][tab][rkey][uv["type"]] = {}
self._uvedb[coll][gen][tab][rkey][uv["type"]]["c"] = 1
self._uvedb[coll][gen][tab][rkey][uv["type"]]["u"] = \
uuid.uuid1(self._ip_code)
chg[uv["key"]] = { uv["type"] : uv["value"] }
# Record stats on UVE Keys being processed
if not self._uveout.has_key(tab):
self._uveout[tab] = {}
if self._uveout[tab].has_key(uv["key"]):
self._uveout[tab][uv["key"]] += 1
else:
self._uveout[tab][uv["key"]] = 1
# Record stats on the input UVE Notifications
if not self._uvein.has_key(tab):
self._uvein[tab] = {}
if not self._uvein[tab].has_key(coll):
self._uvein[tab][coll] = {}
if not self._uvein[tab][coll].has_key(gen):
self._uvein[tab][coll][gen] = {}
if not self._uvein[tab][coll][gen].has_key(uv["type"]):
self._uvein[tab][coll][gen][uv["type"]] = 1
else:
self._uvein[tab][coll][gen][uv["type"]] += 1
else:
# Record stats on UVE Keys being processed
for tab in self._uvedb[coll][gen].keys():
for rkey in self._uvedb[coll][gen][tab].keys():
uk = tab + ":" + rkey
if not self._uveout.has_key(tab):
self._uveout[tab] = {}
if self._uveout[tab].has_key(uk):
self._uveout[tab][uk] += 1
else:
self._uveout[tab][uk] = 1
# when a generator is delelted, we need to
# notify for *ALL* its UVEs
chg[uk] = None
del self._uvedb[coll][gen]
except Exception as ex:
template = "An exception of type {0} in uve proc . Arguments:\n{1!r}"
messag = template.format(type(ex).__name__, ex.args)
self._logger.info("%s" % messag)
return False
else:
self._callback(self._partno, chg)
return True
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
workers = {}
brokers = "localhost:9092,localhost:9093,localhost:9094"
group = "workers"
kafka = KafkaClient(brokers,str(os.getpid()))
cons = SimpleConsumer(kafka, group, "ctrl")
cons.provide_partition_info()
print "Starting control"
end_ready = False
while end_ready == False:
try:
while True:
part, mmm = cons.get_message(timeout=None)
mm = mmm.message
print "Consumed ctrl " + str(mm)
if mm.value == "start":
if workers.has_key(mm.key):
print "Dup partition %s" % mm.key
raise ValueError
else:
ph = UveStreamProc(brokers, int(mm.key), "uve-" + mm.key, "alarm-x" + mm.key, logging)
ph.start()
workers[int(mm.key)] = ph
elif mm.value == "stop":
#import pdb; pdb.set_trace()
if workers.has_key(int(mm.key)):
ph = workers[int(mm.key)]
gevent.kill(ph)
res,db = ph.get()
print "Returned " + str(res)
print "State :"
for k,v in db.iteritems():
print "%s -> %s" % (k,str(v))
del workers[int(mm.key)]
else:
end_ready = True
cons.commit()
gevent.sleep(2)
break
except TypeError:
gevent.sleep(0.1)
except common.FailedPayloadsError as ex:
print "Payload Error: " + str(ex.args)
gevent.sleep(0.1)
lw=[]
for key, value in workers.iteritems():
gevent.kill(value)
lw.append(value)
gevent.joinall(lw)
print "Ending Consumers"<|fim▁end|> | data = { 'instance-id' : self._aginst, |
<|file_name|>find_target.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys<|fim▁hole|>if len(sys.argv) != 3:
print("Find the value keyword in all pairs")
print(("Usage: ", sys.argv[0], "[input] [keyword]"))
exit(1)
find_target_items(sys.argv[1], sys.argv[2])<|fim▁end|> | from common import find_target_items
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>class CheckBase(object):
"""
Base class for checks.
<|fim▁hole|> # pylint: disable=W0105
"""Git hooks to which this class applies. A list of strings."""
def execute(self, hook):
"""
Executes the check.
:param hook: The name of the hook being run.
:type hook: :class:`str`
:returns: ``True`` if the check passed, ``False`` if not.
:rtype: :class:`bool`
"""
pass<|fim▁end|> | """
hooks = [] |
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|>export default from './unview.container'<|fim▁end|> | |
<|file_name|>losses.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.ops.losses import util as tf_losses_util
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see
https://www.tensorflow.org/tutorials/distribute/custom_training for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```python
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
# SUM_OVER_BATCH is only allowed in losses managed by `fit` or
# CannedEstimators.
self._allow_sum_over_batch_size = False
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
broadcasted to this shape), then each loss element of `y_pred` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
scope_name = 'lambda' if self.name == '<lambda>' else self.name
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
Returns:
Loss values with the shape `[batch_size, d0, .. dN-1]`.
"""
NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if (not self._allow_sum_over_batch_size and
distribution_strategy_context.has_strategy() and
(self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.Reduction.NONE)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/tutorials/distribute/custom_training'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
`loss = square(y_true - y_pred)`
Usage:
>>> mse = tf.keras.losses.MeanSquaredError()
>>> loss = mse([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
0.5
>>> loss = mse([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
0.25
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Usage:
>>> mae = tf.keras.losses.MeanAbsoluteError()
>>> loss = mae([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
0.5
>>> loss = mae([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
0.25
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Usage:
>>> mape = tf.keras.losses.MeanAbsolutePercentageError()
>>> loss = mape([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
500000000.0
>>> loss = mape([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
250000000.0
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true) - log(y_pred))`
Usage:
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError()
>>> loss = msle([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
0.24022643
>>> loss = msle([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
0.12011322
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss when there are only two label classes (assumed to
be 0 and 1). For each example, there should be a single floating-point value
per prediction.
In the snippet below, each of the four examples has only a single
floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`.
Usage:
>>> bce = tf.keras.losses.BinaryCrossentropy()
>>> loss = bce([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
0.81492424
>>> loss = bce([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> loss.numpy()
0.45814526
Usage with the `tf.keras` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
```
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume<|fim▁hole|> label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we
compute the loss between the predicted labels and a smoothed version of
the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: (Optional) Name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Usage:
>>> cce = tf.keras.losses.CategoricalCrossentropy()
>>> loss = cce([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> loss.numpy()
1.1769392
>>> loss = cce([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> loss.numpy()
0.8135988
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
**Note: Using from_logits=True is more numerically stable.**
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
>>> loss = scce([1, 2], [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> loss.numpy()
1.1769392
>>> loss = scce([1, 2], [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> loss.numpy()
0.8135988
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
Note: Using from_logits=True may be more numerically stable.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> h = tf.keras.losses.Hinge()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
1.3
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0])
>>> loss.numpy()
0.55
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> h = tf.keras.losses.SquaredHinge()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
1.86
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0])
>>> loss.numpy()
0.73
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)`
Usage:
>>> h = tf.keras.losses.CategoricalHinge()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
1.4000001
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0])
>>> loss.numpy()
0.6
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Usage:
>>> p = tf.keras.losses.Poisson()
>>> loss = p([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]])
>>> loss.numpy()
0.49999997
>>> loss = p([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]],
... sample_weight=[1., 0.])
>>> loss.numpy()
0.49999997
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`.
Usage:
>>> l = tf.keras.losses.LogCosh()
>>> loss = l([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]])
>>> loss.numpy()
0.10844523
>>> loss = l([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]],
... sample_weight=[1., 0.])
>>> loss.numpy()
0.10844523
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'):
super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
>>> kl = tf.keras.losses.KLDivergence()
>>> loss = kl([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
0.45814306
>>> loss = kl([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> loss.numpy()
0.4581446
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage:
>>> h = tf.keras.losses.Huber()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
0.155
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> loss.numpy()
0.09
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Huber())
```
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
super(Huber, self).__init__(
huber_loss, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
`loss = abs(y_true - y_pred)`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true) - log(y_pred))`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)
second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)`
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
y_pred: The predicted values.
Returns:
Categorical hinge loss values.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Computes the binary crossentropy loss.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
loss = tf.keras.losses.KLD([.4, .9, .2], [.5, .8, .12])
print('Loss: ', loss.numpy()) # Loss: 0.11891246
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Note that it is a negative quantity between -1 and 0, where 0 indicates
orthogonality and values closer to -1 indicate greater similarity. This makes
it usable as a loss function in a setting where you try to maximize the
proximity between predictions and targets.
`loss = -sum(y_true * y_pred)`
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity.
Returns:
Cosine similarity tensor.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` and `y_pred`.
`loss = -sum(y_true * y_pred)`
Usage:
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
>>> loss = cosine_loss([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> loss.numpy()
-0.49999997
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}<|fim▁end|> | that `y_pred` contains probabilities (i.e., values in [0, 1]).
Note: Using from_logits=True may be more numerically stable. |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# This setup file is used when running cloud training or cloud dataflow jobs.
from setuptools import setup, find_packages
setup(
name='trainer',
version='1.0.0',
packages=find_packages(),
description='Google Cloud Datalab helper sub-package',
author='Google',
author_email='[email protected]',
keywords=[
],
license="Apache Software License",
long_description="""
""",
install_requires=[
'tensorflow==1.15.2',
'protobuf==3.1.0',<|fim▁hole|> ],
package_data={
},
data_files=[],
)<|fim▁end|> | 'pillow==6.2.0', # ML Engine does not have PIL installed |
<|file_name|>settings.rs<|end_file_name|><|fim▁begin|>use ProtocolEngineBuilder;
use Protocol;
pub trait OptionSetter<T> {
fn set_option(self, T) -> T;
}
#[derive(Clone,Copy,Debug)]
pub struct Bytes(pub usize);
#[derive(Clone,Copy,Debug)]
pub struct Kilobytes(pub usize);
#[derive(Clone,Copy,Debug)]
pub struct Megabytes(pub usize);
pub trait ToBytes {
fn to_bytes(&self) -> Bytes;
}
impl ToBytes for Bytes {
fn to_bytes(&self) -> Bytes {
*self
}
}
impl ToBytes for Kilobytes {
fn to_bytes(&self) -> Bytes {
let Kilobytes(kb) = *self;<|fim▁hole|> }
}
impl ToBytes for Megabytes {
fn to_bytes(&self) -> Bytes {
let Megabytes(mb) = *self;
Bytes(mb * 1_000_000)
}
}
pub struct InitialBufferSize<T>(pub T) where T: ToBytes;
pub struct InitialBufferPoolSize(pub usize);
pub struct MaxBufferPoolSize(pub usize);
impl <P, T> OptionSetter<ProtocolEngineBuilder<P>> for InitialBufferSize<T> where P: Protocol, T: ToBytes {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let InitialBufferSize(size) = self;
let number_of_bytes: Bytes = size.to_bytes();
builder.starting_buffer_size = number_of_bytes;
builder
}
}
impl <P> OptionSetter<ProtocolEngineBuilder<P>> for InitialBufferPoolSize where P: Protocol {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let InitialBufferPoolSize(number_of_buffers) = self;
builder.buffer_pool_size = number_of_buffers;
builder
}
}
impl <P> OptionSetter<ProtocolEngineBuilder<P>> for MaxBufferPoolSize where P: Protocol {
fn set_option(self, mut builder: ProtocolEngineBuilder<P>) -> ProtocolEngineBuilder<P> {
let MaxBufferPoolSize(number_of_buffers) = self;
builder.max_buffer_pool_size = number_of_buffers;
builder
}
}<|fim▁end|> | Bytes(kb * 1_000) |
<|file_name|>cacheprovider.py<|end_file_name|><|fim▁begin|>"""
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):<|fim▁hole|> self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0<|fim▁end|> | self.trace("clearing cachedir")
if self._cachedir.check(): |
<|file_name|>thread.cc<|end_file_name|><|fim▁begin|>#include "macros.h"
#include "thread.h"
using namespace std;
ndb_thread::~ndb_thread()
{
}
<|fim▁hole|> thd_ = std::move(thread(&ndb_thread::run, this));
if (daemon_)
thd_.detach();
}
void
ndb_thread::join()
{
ALWAYS_ASSERT(!daemon_);
thd_.join();
}
// can be overloaded by subclasses
void
ndb_thread::run()
{
ALWAYS_ASSERT(body_);
body_();
}<|fim▁end|> | void
ndb_thread::start()
{ |
<|file_name|>VarToken.java<|end_file_name|><|fim▁begin|>package com.squarespace.template.expr;
import java.util.Arrays;
/**
* Token representing a variable name. Could hold a reference or
* a definition.
*/
public class VarToken extends Token {
public final Object[] name;
public VarToken(Object[] name) {
super(ExprTokenType.VARIABLE);
this.name = name;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof VarToken) {
return Arrays.equals(name, ((VarToken)obj).name);
}
return false;<|fim▁hole|> @Override
public String toString() {
return "VarToken[" + Arrays.toString(name) + "]";
}
}<|fim▁end|> | }
|
<|file_name|>mutate.rs<|end_file_name|><|fim▁begin|>use malachite_base::rational_sequences::RationalSequence;
use malachite_base_test_util::generators::large_type_gen_var_22;
#[test]
pub fn test_mutate() {
fn test(
non_repeating: &[u8],
repeating: &[u8],
index: usize,
new_value: u8,
out: u8,
non_repeating_out: &[u8],
repeating_out: &[u8],
) {
let mut xs = RationalSequence::from_slices(non_repeating, repeating);
assert_eq!(
xs.mutate(index, |x| {
*x = new_value;
out
}),
out
);
assert_eq!(
xs,
RationalSequence::from_slices(non_repeating_out, repeating_out)
);
}
test(&[1, 2, 3], &[], 0, 5, 6, &[5, 2, 3], &[]);
test(&[1, 2, 3], &[], 1, 5, 6, &[1, 5, 3], &[]);
test(&[1, 2, 3], &[], 2, 5, 6, &[1, 2, 5], &[]);
test(
&[1, 2, 3],
&[4, 5, 6],
3,
100,
6,
&[1, 2, 3, 100],
&[5, 6, 4],
);
test(
&[1, 2, 3],
&[4, 5, 6],
10,
100,
6,
&[1, 2, 3, 4, 5, 6, 4, 5, 6, 4, 100],
&[6, 4, 5],
);
}
#[test]
#[should_panic]
fn mutate_fail_1() {
RationalSequence::<u8>::from_vec(vec![]).mutate(0, |_| {});
}
#[test]
#[should_panic]
fn mutate_fail_2() {
RationalSequence::from_vec(vec![1, 2, 3]).mutate(3, |_| {})
}
#[test]
fn mutate_properties() {
large_type_gen_var_22::<u8>().test_properties(|(mut xs, index, y, z)| {
let xs_old = xs.clone();
let x_old = xs[index];
assert_eq!(
xs.mutate(index, |x| {
*x = y;
z
}),
z
);
assert_eq!(xs[index], y);<|fim▁hole|> });
}<|fim▁end|> | xs.mutate(index, |x| {
*x = x_old;
});
assert_eq!(xs, xs_old); |
<|file_name|>zeit.py<|end_file_name|><|fim▁begin|>#/logics/zeit.py
#!/usr/bin/env python
sh_now = sh.now()
debug = False
# Funktionen
def leap_year(year):
if (year % 400 == 0) or ((year % 4 == 0) and not (year % 100 == 0)):
return True
else:
return False
def days_of_month(month, year):
if month in [1, 3, 5, 7, 8, 10, 12]:
days = 31
elif month in [4, 6, 9, 11]:
days = 30
elif leap_year(year):
days = 29
else:
days = 28
return days
def days_of_year(year):
period_end = datetime.datetime(year,12,31)
days_of_year = (period_end - datetime.datetime(period_end.year, 1, 1)).days + 1
return(days_of_year)
def day_of_year(year,month,day):
period_end = datetime.datetime(year,month,day)
day_of_year = (period_end - datetime.datetime(period_end.year, 1, 1)).days + 1<|fim▁hole|> print("RUNNING LOGIC OF TIME - REMOVE AFTER DEBUG")
print(sh_now.hour) #Stunde
print(sh_now.minute) #Minute
print(sh_now.second) #Sekunde
print(sh_now.day) #Tag
print(sh_now.month) #Monat
print(sh_now.isoweekday()) #Wochentag
print(sh.now().isocalendar()[1]) #Kalenderwoche
# Sekunde/Minute
sh.second.since.minute(sh_now.second)
sh.second.until.minute(60 - sh_now.second - 1)
# Minute/Stunde
sh.minute.since.hour(sh_now.minute)
sh.minute.until.hour(60 - sh_now.minute - 1)
# Stunde/Tag
sh.hour.since.midnight(sh_now.hour)
sh.hour.until.midnight(24 - sh_now.hour - 1)
# Tag/Woche
sh.day.since.week(sh_now.isoweekday())
sh.day.until.week(7 - sh_now.isoweekday())
# Stunde/Woche
sh.hour.since.week(sh.hour.since.midnight() + (24 * (sh.day.since.week() - 1)))
sh.hour.until.week(sh.hour.until.midnight() + (24 * sh.day.until.week()))
# Kalenderwoche/Jahr
sh.week.since.year(sh.now().isocalendar()[1])
# Monat/Jahr
sh.month.since.year(sh_now.month)
sh.month.until.year(12-sh_now.month)
# Sekunde/Stunde
sh.second.since.hour(sh.second.since.minute() + (60 * sh.minute.since.hour()))
sh.second.until.hour(sh.second.until.minute() + (60 * sh.minute.until.hour()))
# Sekunde/Tag
sh.second.since.midnight(sh.second.since.minute() + (3600 * sh.hour.since.midnight()))
sh.second.until.midnight(sh.second.until.minute() + (3600 * sh.hour.until.midnight()))
# Minute/Tag
sh.minute.since.midnight(sh.minute.since.hour() + (60 * sh.hour.since.midnight()))
sh.minute.until.midnight(sh.minute.until.hour() + (60 * sh.hour.until.midnight()))
# Minute/Woche
sh.minute.since.week(sh.minute.since.hour() + (60 * sh.hour.since.week()))
sh.minute.until.week(sh.minute.until.hour() + (60 * sh.hour.until.week()))
# Sekunde/Woche
sh.second.since.week(sh.second.since.minute() + (60 * sh.minute.since.week()))
sh.second.until.week(sh.second.until.minute() + (60 * sh.minute.until.week()))
# Tage/Monat
sh.day.since.month(sh_now.day - 1)
sh.day.until.month(days_of_month(sh_now.month,sh_now.year) - sh.day.since.month() - 1)
# Tage/Jahr
sh.day.since.year(day_of_year(sh_now.year,sh_now.month,sh_now.day) - 1)
sh.day.until.year(days_of_year(sh_now.year) - sh.day.since.year() - 1)
# Stunde/Monat
sh.hour.since.month((24 * (sh.day.since.month() - 1)) + sh.hour.since.midnight())
sh.hour.until.month((24 * days_of_month(sh_now.month,sh_now.year)) - sh.hour.since.month() - 1)
# Stunde/Jahr
sh.hour.since.year((24 * (sh.day.since.year() -1)) + sh.hour.since.midnight())
sh.hour.until.year((24 * days_of_year(sh_now.year)) - sh.hour.since.year() - 1)
# Minute/Monat
sh.minute.since.month((60 * sh.hour.since.month()) + sh.minute.since.hour())
sh.minute.until.month(sh.minute.since.month() - (60 * sh.hour.until.month()) - 1)
# Minute/Jahr
sh.minute.since.year((60 * sh.hour.since.year()) + sh.minute.since.hour())
sh.minute.until.year((60 * sh.hour.until.year()) + sh.minute.until.hour())
# Sekunde/Monat
sh.second.since.month((60 * sh.minute.since.month()) + sh.second.since.minute())
sh.second.until.month((60 * sh.minute.until.month()) + sh.second.until.minute())
# Sekunde/Jahr
sh.second.since.year((60 * sh.minute.since.year()) + sh.second.since.minute())
sh.second.until.year((60 * sh.minute.until.year()) + sh.second.until.minute())<|fim▁end|> | return(day_of_year)
if debug == True: |
<|file_name|>test_06_missing_splits.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing predictions with missing splits
"""
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
def setup_module():
"""Setup for the module
"""
common_setup_module()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestMissingSplits(object):
def teardown(self):
"""Calling generic teardown for every method
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
teardown_class()
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully building test predictions with missing-splits model:
Given I create BigML resources uploading train "<data>" file to test "<test>" with a missing-splits model and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris_missing.csv | ../data/test_iris_missing.csv | ./scenario_mspl_1/predictions.csv | ./check_files/predictions_iris_missing.csv |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris_missing.csv', 'data/test_iris_missing.csv', 'scenario_mspl_1/predictions.csv', 'check_files/predictions_iris_missing.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_create_all_resources_missing_splits(self, data=example[0], test=example[1], output=example[2])<|fim▁hole|> test_pred.i_check_predictions(self, example[3])
def test_scenario2(self):
"""
Scenario: Successfully building test predictions from scratch:
Given I create BigML resources uploading train "<data>" file to test "<test>" remotely with a missing-splits model and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the model has been created
And I check that the source has been created from the test file
And I check that the dataset has been created from the test file
And I check that the batch prediction has been created
And I check that the predictions are ready
Then the local prediction file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/iris_missing.csv | ../data/test_iris_missing.csv | ./scenario_mspl_2/predictions.csv | ./check_files/predictions_iris_missing.csv
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris_missing.csv', 'data/test_iris_missing.csv', 'scenario_mspl_2/predictions.csv', 'check_files/predictions_iris_missing.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_create_all_resources_remote_missing_splits(self, data=example[0], test=example[1], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_test_source(self)
test_pred.i_check_create_test_dataset(self)
test_pred.i_check_create_batch_prediction(self)
test_pred.i_check_create_predictions(self)
test_pred.i_check_predictions(self, example[3])<|fim▁end|> | test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_predictions(self) |
<|file_name|>0003_update_keys_to_textversion.py<|end_file_name|><|fim▁begin|>from south.db import db
from django.db import models
from cm.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
for tv in orm.TextVersion.objects.all():
tv.key = orm.TextVersion.objects._gen_key()
tv.adminkey = orm.TextVersion.objects._gen_adminkey()
tv.save()
def backwards(self, orm):
"Write your backwards migration here"
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
'cm.activity': {
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': 'None', 'max_length': '15', 'null': 'True', 'blank': 'True'}),
'originator_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'originator_activity'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}),
'text_version': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['cm.TextVersion']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.attachment': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),<|fim▁hole|> 'data': ('django.db.models.fields.files.FileField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'text_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.TextVersion']"})
},
'cm.comment': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_html': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end_offset': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'end_wrapper': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Comment']", 'null': 'True', 'blank': 'True'}),
'start_offset': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_wrapper': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tags': ('tagging.fields.TagField', [], {}),
'text_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.TextVersion']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.configuration': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'raw_value': ('django.db.models.fields.TextField', [], {})
},
'cm.email': {
'bcc': ('django.db.models.fields.TextField', [], {}),
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.TextField', [], {}),
'to': ('django.db.models.fields.TextField', [], {})
},
'cm.notification': {
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.role': {
'anon': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'global_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']"})
},
'cm.text': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'last_text_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'related_text'", 'null': 'True', 'to': "orm['cm.TextVersion']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'private_feed_key': ('django.db.models.fields.CharField', [], {'null': 'True', 'default': 'None', 'max_length': '20', 'blank': 'True', 'unique': 'True', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.textversion': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'mod_posteriori': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {'max_length': '1000'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']"}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cm.userprofile': {
'adminkey': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'allow_contact': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_email_error': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_temp': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'cm.userrole': {
'Meta': {'unique_together': "(('role', 'user', 'text'),)"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Role']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cm.Text']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cm']<|fim▁end|> | |
<|file_name|>createStoreSpec.js<|end_file_name|><|fim▁begin|>import expect from 'expect';
import createStore from './createStore';
describe('createStore()', () => {
let store;
beforeEach(() => {
store = createStore();
});
it('should write data and return its key when write() is called', () => {
const hash = store.write({ hello: 'world' });
expect(hash).toBe(store.keys()[0]);
});
it('should return data when read() is called with a valid key', () => {
const hash = store.write({ hello: 'world' });
expect(store.read(hash)).toEqual({ hello: 'world' });
});
it('should throw an error when read() is called with an invalid key', () => {<|fim▁hole|> expect(() => store.read('wrong')).toThrow(/Entry wrong not found/);
});
it('should return all keys when keys() is called', () => {
const hash1 = store.write({ hello: 'world' });
const hash2 = store.write({ hello2: 'world2' });
expect(store.keys()).toEqual([
hash1,
hash2,
]);
});
it('should return all store content when toJSON() is called', () => {
const hash1 = store.write({ hello: 'world' });
const hash2 = store.write({ hello2: 'world2' });
expect(store.toJSON()).toEqual({
[hash1]: {
hello: 'world',
},
[hash2]: {
hello2: 'world2',
},
});
});
it('should init the store if a snapshot is given', () => {
const localStore = createStore({
ae3: {
hello: 'world',
},
});
expect(localStore.read('ae3')).toEqual({
hello: 'world',
});
});
it('should write data with the given hash if provided', () => {
const hash = store.write({ hello: 'world' }, 'forcedHash');
expect(hash).toBe('forcedHash');
expect(store.keys()[0]).toBe('forcedHash');
expect(store.read('forcedHash')).toEqual({ hello: 'world' });
});
it('should notify any subscriber when something is written into the store', () => {
const subscriber1 = expect.createSpy();
store.subscribe(subscriber1);
const subscriber2 = expect.createSpy();
store.subscribe(subscriber2);
const hash = store.write({ hello: 'world' });
expect(subscriber1).toHaveBeenCalledWith(hash);
expect(subscriber2).toHaveBeenCalledWith(hash);
store.unsubscribe(subscriber1);
const hash2 = store.write({ hello: 'earth' });
expect(subscriber1.calls.length).toBe(1);
expect(subscriber2).toHaveBeenCalledWith(hash2);
expect(subscriber2.calls.length).toBe(2);
});
});<|fim▁end|> | store.write({ hello: 'world' }); |
<|file_name|>content.component.ts<|end_file_name|><|fim▁begin|>import {Component, Input, OnInit, ViewChild} from '@angular/core';
import {Messages} from "../../models/message";
import {Threadz} from "../../models/threadz";
import {HttpClient} from "@angular/common/http";
import {MatRipple} from "@angular/material";
import {FormBuilder, FormGroup} from "@angular/forms";
@Component({
selector: 'app-content',
templateUrl: './content.component.html',
styleUrls: ['./content.component.css']
})
export class ContentComponent implements OnInit {
@Input() selectedThread: Threadz;
@Input() messageList: Messages[];
@ViewChild(MatRipple) ripple: MatRipple;
form: FormGroup;
constructor(private http: HttpClient, private fb: FormBuilder) {
}
<|fim▁hole|> this.form = this.fb.group({
msg: '',
});
}
sendMessage(b) {
console.log(this.form.get('msg').value);
}
}<|fim▁end|> | ngOnInit() {
|
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import os.path
import sys
from genshi.builder import tag
from trac.admin import IAdminCommandProvider, IAdminPanelProvider
from trac.config import ListOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.util import as_bool, is_path_below
from trac.util.compat import any
from trac.util.text import breakable_path, normalize_whitespace, print_table, \
printout
from trac.util.translation import _, ngettext, tag_
from trac.versioncontrol import DbRepositoryProvider, RepositoryManager, \
is_default
from trac.web.chrome import Chrome, add_notice, add_warning
class VersionControlAdmin(Component):
"""trac-admin command provider for version control administration."""
implements(IAdminCommandProvider, IPermissionRequestor)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('changeset added', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets added to a repository
This command should be called from a post-commit hook. It will
trigger a cache update and notify components about the addition.
""",
self._complete_repos, self._do_changeset_added)
yield ('changeset modified', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets modified in a repository
This command should be called from a post-revprop hook after
revision properties like the commit message, author or date
have been changed. It will trigger a cache update for the given
revisions and notify components about the change.
""",
self._complete_repos, self._do_changeset_modified)
yield ('repository list', '',
'List source repositories',
None, self._do_list)
yield ('repository resync', '<repos> [rev]',
"""Re-synchronize trac with repositories
When [rev] is specified, only that revision is synchronized.
Otherwise, the complete revision history is synchronized. Note
that this operation can take a long time to complete.
If synchronization gets interrupted, it can be resumed later
using the `sync` command.
To synchronize all repositories, specify "*" as the repository.
""",
self._complete_repos, self._do_resync)
yield ('repository sync', '<repos> [rev]',
"""Resume synchronization of repositories
Similar to `resync`, but doesn't clear the already synchronized
changesets. Useful for resuming an interrupted `resync`.
To synchronize all repositories, specify "*" as the repository.
""",
self._complete_repos, self._do_sync)
<|fim▁hole|> rm = RepositoryManager(self.env)
return [reponame or '(default)' for reponame
in rm.get_all_repositories()]
def _complete_repos(self, args):
if len(args) == 1:
return self.get_reponames()
def _do_changeset_added(self, reponame, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
rm.notify('changeset_added', reponame, revs)
def _do_changeset_modified(self, reponame, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
rm.notify('changeset_modified', reponame, revs)
def _do_list(self):
rm = RepositoryManager(self.env)
values = []
for (reponame, info) in sorted(rm.get_all_repositories().iteritems()):
alias = ''
if 'alias' in info:
alias = info['alias'] or '(default)'
values.append((reponame or '(default)', info.get('type', ''),
alias, info.get('dir', '')))
print_table(values, [_('Name'), _('Type'), _('Alias'), _('Directory')])
def _sync(self, reponame, rev, clean):
rm = RepositoryManager(self.env)
if reponame == '*':
if rev is not None:
raise TracError(_('Cannot synchronize a single revision '
'on multiple repositories'))
repositories = rm.get_real_repositories()
else:
if is_default(reponame):
reponame = ''
repos = rm.get_repository(reponame)
if repos is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=reponame or '(default)'))
if rev is not None:
repos.sync_changeset(rev)
printout(_('%(rev)s resynced on %(reponame)s.', rev=rev,
reponame=repos.reponame or '(default)'))
return
repositories = [repos]
db = self.env.get_db_cnx()
for repos in sorted(repositories, key=lambda r: r.reponame):
printout(_('Resyncing repository history for %(reponame)s... ',
reponame=repos.reponame or '(default)'))
repos.sync(self._sync_feedback, clean=clean)
cursor = db.cursor()
cursor.execute("SELECT count(rev) FROM revision WHERE repos=%s",
(repos.id,))
for cnt, in cursor:
printout(ngettext('%(num)s revision cached.',
'%(num)s revisions cached.', num=cnt))
printout(_('Done.'))
def _sync_feedback(self, rev):
sys.stdout.write(' [%s]\r' % rev)
sys.stdout.flush()
def _do_resync(self, reponame, rev=None):
self._sync(reponame, rev, clean=True)
def _do_sync(self, reponame, rev=None):
self._sync(reponame, rev, clean=False)
# IPermissionRequestor methods
def get_permission_actions(self):
return [('VERSIONCONTROL_ADMIN', ['BROWSER_VIEW', 'CHANGESET_VIEW',
'FILE_VIEW', 'LOG_VIEW'])]
class RepositoryAdminPanel(Component):
"""Web admin panel for repository administration."""
implements(IAdminPanelProvider)
allowed_repository_dir_prefixes = ListOption('versioncontrol',
'allowed_repository_dir_prefixes', '',
doc="""Comma-separated list of allowed prefixes for repository
directories when adding and editing repositories in the repository
admin panel. If the list is empty, all repository directories are
allowed. (''since 0.12.1'')""")
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'VERSIONCONTROL_ADMIN' in req.perm:
yield ('versioncontrol', _('Version Control'), 'repository',
_('Repositories'))
def render_admin_panel(self, req, category, page, path_info):
req.perm.require('VERSIONCONTROL_ADMIN')
# Retrieve info for all repositories
rm = RepositoryManager(self.env)
all_repos = rm.get_all_repositories()
db_provider = self.env[DbRepositoryProvider]
if path_info:
# Detail view
reponame = not is_default(path_info) and path_info or ''
info = all_repos.get(reponame)
if info is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=path_info))
if req.method == 'POST':
if req.args.get('cancel'):
req.redirect(req.href.admin(category, page))
elif db_provider and req.args.get('save'):
# Modify repository
changes = {}
for field in db_provider.repository_attrs:
value = normalize_whitespace(req.args.get(field))
if (value is not None or field == 'hidden') \
and value != info.get(field):
changes[field] = value
if 'dir' in changes \
and not self._check_dir(req, changes['dir']):
changes = {}
if changes:
db_provider.modify_repository(reponame, changes)
add_notice(req, _('Your changes have been saved.'))
name = req.args.get('name')
resync = tag.tt('trac-admin $ENV repository resync "%s"'
% (name or '(default)'))
if 'dir' in changes:
msg = tag_('You should now run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
elif 'type' in changes:
msg = tag_('You may have to run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
if name and name != path_info and not 'alias' in info:
cset_added = tag.tt('trac-admin $ENV changeset '
'added "%s" $REV'
% (name or '(default)'))
msg = tag_('You will need to update your post-commit '
'hook to call %(cset_added)s with the new '
'repository name.', cset_added=cset_added)
add_notice(req, msg)
if changes:
req.redirect(req.href.admin(category, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'reponame': reponame}
else:
# List view
if req.method == 'POST':
# Add a repository
if db_provider and req.args.get('add_repos'):
name = req.args.get('name')
type_ = req.args.get('type')
# Avoid errors when copy/pasting paths
dir = normalize_whitespace(req.args.get('dir', ''))
if name is None or type_ is None or not dir:
add_warning(req, _('Missing arguments to add a '
'repository.'))
elif self._check_dir(req, dir):
db_provider.add_repository(name, dir, type_)
name = name or '(default)'
add_notice(req, _('The repository "%(name)s" has been '
'added.', name=name))
resync = tag.tt('trac-admin $ENV repository resync '
'"%s"' % name)
msg = tag_('You should now run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
cset_added = tag.tt('trac-admin $ENV changeset '
'added "%s" $REV' % name)
msg = tag_('You should also set up a post-commit hook '
'on the repository to call %(cset_added)s '
'for each committed changeset.',
cset_added=cset_added)
add_notice(req, msg)
req.redirect(req.href.admin(category, page))
# Add a repository alias
elif db_provider and req.args.get('add_alias'):
name = req.args.get('name')
alias = req.args.get('alias')
if name is not None and alias is not None:
db_provider.add_alias(name, alias)
add_notice(req, _('The alias "%(name)s" has been '
'added.', name=name or '(default)'))
req.redirect(req.href.admin(category, page))
add_warning(req, _('Missing arguments to add an '
'alias.'))
# Refresh the list of repositories
elif req.args.get('refresh'):
req.redirect(req.href.admin(category, page))
# Remove repositories
elif db_provider and req.args.get('remove'):
sel = req.args.getlist('sel')
if sel:
for name in sel:
db_provider.remove_repository(name)
add_notice(req, _('The selected repositories have '
'been removed.'))
req.redirect(req.href.admin(category, page))
add_warning(req, _('No repositories were selected.'))
data = {'view': 'list'}
# Find repositories that are editable
db_repos = {}
if db_provider is not None:
db_repos = dict(db_provider.get_repositories())
# Prepare common rendering data
repositories = dict((reponame, self._extend_info(reponame, info.copy(),
reponame in db_repos))
for (reponame, info) in all_repos.iteritems())
types = sorted([''] + rm.get_supported_types())
data.update({'types': types, 'default_type': rm.repository_type,
'repositories': repositories})
return 'admin_repositories.html', data
def _extend_info(self, reponame, info, editable):
"""Extend repository info for rendering."""
info['name'] = reponame
if info.get('dir') is not None:
info['prettydir'] = breakable_path(info['dir']) or ''
info['hidden'] = as_bool(info.get('hidden'))
info['editable'] = editable
if not info.get('alias'):
try:
repos = RepositoryManager(self.env).get_repository(reponame)
youngest_rev = repos.get_youngest_rev()
info['rev'] = youngest_rev
info['display_rev'] = repos.display_rev(youngest_rev)
except Exception:
pass
return info
def _check_dir(self, req, dir):
"""Check that a repository directory is valid, and add a warning
message if not.
"""
if not os.path.isabs(dir):
add_warning(req, _('The repository directory must be an absolute '
'path.'))
return False
prefixes = [os.path.join(self.env.path, prefix)
for prefix in self.allowed_repository_dir_prefixes]
if prefixes and not any(is_path_below(dir, prefix)
for prefix in prefixes):
add_warning(req, _('The repository directory must be located '
'below one of the following directories: '
'%(dirs)s', dirs=', '.join(prefixes)))
return False
return True<|fim▁end|> | def get_reponames(self): |
<|file_name|>start.go<|end_file_name|><|fim▁begin|>package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"runtime"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/containers"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/errdefs"
"github.com/moby/sys/mount"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ContainerStart starts a container.
func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error {
if checkpoint != "" && !daemon.HasExperimental() {
return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode"))
}
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
validateState := func() error {
ctr.Lock()
defer ctr.Unlock()
if ctr.Paused {
return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead"))
}
if ctr.Running {
return containerNotModifiedError{running: true}
}
if ctr.RemovalInProgress || ctr.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
}
return nil
}
if err := validateState(); err != nil {
return err
}
// Windows does not have the backwards compatibility issue here.
if runtime.GOOS != "windows" {
// This is kept for backward compatibility - hostconfig should be passed when
// creating a container, not during start.
if hostConfig != nil {
logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12")
oldNetworkMode := ctr.HostConfig.NetworkMode
if err := daemon.setSecurityOptions(ctr, hostConfig); err != nil {
return errdefs.InvalidParameter(err)
}
if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil {
return errdefs.InvalidParameter(err)
}
if err := daemon.setHostConfig(ctr, hostConfig); err != nil {
return errdefs.InvalidParameter(err)
}
newNetworkMode := ctr.HostConfig.NetworkMode
if string(oldNetworkMode) != string(newNetworkMode) {
// if user has change the network mode on starting, clean up the
// old networks. It is a deprecated feature and has been removed in Docker 1.12
ctr.NetworkSettings.Networks = nil
if err := ctr.CheckpointTo(daemon.containersReplica); err != nil {
return errdefs.System(err)
}
}
ctr.InitDNSHostConfig()
}
} else {
if hostConfig != nil {
return errdefs.InvalidParameter(errors.New("Supplying a hostconfig on start is not supported. It should be supplied on create"))
}
}
// check if hostConfig is in line with the current system settings.
// It may happen cgroups are umounted or the like.
if _, err = daemon.verifyContainerSettings(ctr.OS, ctr.HostConfig, nil, false); err != nil {
return errdefs.InvalidParameter(err)
}
// Adapt for old containers in case we have updates in this function and
// old containers never have chance to call the new function in create stage.
if hostConfig != nil {
if err := daemon.adaptContainerSettings(ctr.HostConfig, false); err != nil {
return errdefs.InvalidParameter(err)
}
}
return daemon.containerStart(ctr, checkpoint, checkpointDir, true)
}
// containerStart prepares the container to run by setting up everything the
// container needs, such as storage and networking, as well as links
// between containers. The container is left waiting for a signal to
// begin running.
func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) {
start := time.Now()
container.Lock()
defer container.Unlock()
if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false
return nil
}
if container.RemovalInProgress || container.Dead {
return errdefs.Conflict(errors.New("container is marked for removal and cannot be started"))
}
if checkpointDir != "" {
// TODO(mlaventure): how would we support that?
return errdefs.Forbidden(errors.New("custom checkpointdir is not supported"))
}
// if we encounter an error during start we need to ensure that any other
// setup has been cleaned up properly
defer func() {
if err != nil {
container.SetError(err)
// if no one else has set it, make sure we don't leave it at zero
if container.ExitCode() == 0 {
container.SetExitCode(128)
}
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err)
}
container.Reset(false)
daemon.Cleanup(container)
// if containers AutoRemove flag is set, remove it after clean up
if container.HostConfig.AutoRemove {
container.Unlock()
if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("can't remove container %s: %v", container.ID, err)
}
container.Lock()
}
}
}()
if err := daemon.conditionalMountOnStart(container); err != nil {
return err
}
if err := daemon.initializeNetworking(container); err != nil {
return err
}
spec, err := daemon.createSpec(container)
if err != nil {
return errdefs.System(err)
}
if resetRestartManager {
container.ResetRestartManager(true)
container.HasBeenManuallyStopped = false
}
if err := daemon.saveAppArmorConfig(container); err != nil {
return err
}
if checkpoint != "" {
checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false)
if err != nil {
return err
}
}
createOptions, err := daemon.getLibcontainerdCreateOptions(container)
if err != nil {
return err
}
ctx := context.TODO()
imageRef, err := reference.ParseNormalizedNamed(container.Config.Image)
if err != nil {
return err
}
err = daemon.containerd.Create(ctx, container.ID, spec, createOptions, withImageName(imageRef.String()))
if err != nil {
if errdefs.IsConflict(err) {
logrus.WithError(err).WithField("container", container.ID).Error("Container not cleaned up from containerd from previous run")
// best effort to clean up old container object
daemon.containerd.DeleteTask(ctx, container.ID)
if err := daemon.containerd.Delete(ctx, container.ID); err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("container", container.ID).Error("Error cleaning up stale containerd container object")
}
err = daemon.containerd.Create(ctx, container.ID, spec, createOptions, withImageName(imageRef.String()))
}
if err != nil {
return translateContainerdStartErr(container.Path, container.SetExitCode, err)
}
}
// TODO(mlaventure): we need to specify checkpoint options here
pid, err := daemon.containerd.Start(context.Background(), container.ID, checkpointDir,
container.StreamConfig.Stdin() != nil || container.Config.Tty,
container.InitializeStdio)
if err != nil {
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
logrus.WithError(err).WithField("container", container.ID).
Error("failed to delete failed start container")
}
return translateContainerdStartErr(container.Path, container.SetExitCode, err)
}
container.SetRunning(pid, true)
container.HasBeenStartedBefore = true
daemon.setStateCounter(container)
daemon.initHealthMonitor(container)
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
logrus.WithError(err).WithField("container", container.ID).
Errorf("failed to store container")
}
daemon.LogContainerEvent(container, "start")
containerActions.WithValues("start").UpdateSince(start)
<|fim▁hole|>
// Cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together. It also unmounts the container's root filesystem.
func (daemon *Daemon) Cleanup(container *container.Container) {
daemon.releaseNetwork(container)
if err := container.UnmountIpcMount(); err != nil {
logrus.Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
}
if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
// FIXME: remove once reference counting for graphdrivers has been refactored
// Ensure that all the mounts are gone
if mountid, err := daemon.imageService.GetLayerMountID(container.ID, container.OS); err == nil {
daemon.cleanupMountsByID(mountid)
}
}
if err := container.UnmountSecrets(); err != nil {
logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
}
if err := mount.RecursiveUnmount(container.Root); err != nil {
logrus.WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.")
}
for _, eConfig := range container.ExecCommands.Commands() {
daemon.unregisterExecCommand(container, eConfig)
}
if container.BaseFS != nil && container.BaseFS.Path() != "" {
if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
}
}
container.CancelAttachContext()
if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
}
}
func withImageName(n string) containerd.NewContainerOpts {
return func(ctx context.Context, _ *containerd.Client, c *containers.Container) error {
c.Image = n
return nil
}
}<|fim▁end|> | return nil
} |
<|file_name|>instr_vpblendmq.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn vpblendmq_1() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(XMM4)), operand2: Some(Direct(XMM0)), operand3: Some(Direct(XMM6)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 242, 253, 138, 100, 230], OperandSize::Dword)
}
<|fim▁hole|>#[test]
fn vpblendmq_2() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM2)), operand3: Some(IndirectScaledIndexed(EDI, EAX, Four, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 242, 237, 141, 100, 28, 135], OperandSize::Dword)
}
#[test]
fn vpblendmq_3() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(XMM4)), operand2: Some(Direct(XMM6)), operand3: Some(IndirectScaledIndexedDisplaced(ESI, ECX, Eight, 355297586, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: Some(BroadcastMode::Broadcast1To2) }, &[98, 242, 205, 154, 100, 164, 206, 50, 105, 45, 21], OperandSize::Dword)
}
#[test]
fn vpblendmq_4() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(XMM29)), operand2: Some(Direct(XMM2)), operand3: Some(Direct(XMM13)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 66, 237, 141, 100, 237], OperandSize::Qword)
}
#[test]
fn vpblendmq_5() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(XMM11)), operand2: Some(Direct(XMM28)), operand3: Some(IndirectScaledDisplaced(RSI, Two, 1417657024, Some(OperandSize::Xmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 114, 157, 130, 100, 28, 117, 192, 186, 127, 84], OperandSize::Qword)
}
#[test]
fn vpblendmq_6() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(XMM29)), operand2: Some(Direct(XMM4)), operand3: Some(IndirectScaledIndexed(RDI, RCX, Eight, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: Some(BroadcastMode::Broadcast1To2) }, &[98, 98, 221, 157, 100, 44, 207], OperandSize::Qword)
}
#[test]
fn vpblendmq_7() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(YMM2)), operand2: Some(Direct(YMM0)), operand3: Some(Direct(YMM4)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K1), broadcast: None }, &[98, 242, 253, 169, 100, 212], OperandSize::Dword)
}
#[test]
fn vpblendmq_8() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM0)), operand3: Some(IndirectDisplaced(EAX, 1522907944, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 242, 253, 173, 100, 176, 40, 187, 197, 90], OperandSize::Dword)
}
#[test]
fn vpblendmq_9() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(YMM4)), operand2: Some(Direct(YMM7)), operand3: Some(IndirectDisplaced(ECX, 902908102, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: Some(BroadcastMode::Broadcast1To4) }, &[98, 242, 197, 186, 100, 161, 198, 72, 209, 53], OperandSize::Dword)
}
#[test]
fn vpblendmq_10() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM16)), operand3: Some(Direct(YMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K4), broadcast: None }, &[98, 242, 253, 164, 100, 240], OperandSize::Qword)
}
#[test]
fn vpblendmq_11() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(YMM14)), operand2: Some(Direct(YMM11)), operand3: Some(IndirectScaledIndexed(RSI, RCX, Two, Some(OperandSize::Ymmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 114, 165, 173, 100, 52, 78], OperandSize::Qword)
}
#[test]
fn vpblendmq_12() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(YMM6)), operand2: Some(Direct(YMM29)), operand3: Some(IndirectScaledIndexed(RBX, RAX, Two, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K1), broadcast: Some(BroadcastMode::Broadcast1To4) }, &[98, 242, 149, 177, 100, 52, 67], OperandSize::Qword)
}
#[test]
fn vpblendmq_13() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(ZMM1)), operand2: Some(Direct(ZMM5)), operand3: Some(Direct(ZMM1)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K2), broadcast: None }, &[98, 242, 213, 202, 100, 201], OperandSize::Dword)
}
#[test]
fn vpblendmq_14() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(ZMM0)), operand2: Some(Direct(ZMM4)), operand3: Some(IndirectScaledIndexedDisplaced(EBX, ESI, Two, 1975636423, Some(OperandSize::Zmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K4), broadcast: None }, &[98, 242, 221, 204, 100, 132, 115, 199, 209, 193, 117], OperandSize::Dword)
}
#[test]
fn vpblendmq_15() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(ZMM0)), operand2: Some(Direct(ZMM1)), operand3: Some(IndirectScaledDisplaced(EBX, Four, 1259826670, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K6), broadcast: Some(BroadcastMode::Broadcast1To8) }, &[98, 242, 245, 222, 100, 4, 157, 238, 109, 23, 75], OperandSize::Dword)
}
#[test]
fn vpblendmq_16() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(ZMM28)), operand2: Some(Direct(ZMM24)), operand3: Some(Direct(ZMM6)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K5), broadcast: None }, &[98, 98, 189, 197, 100, 230], OperandSize::Qword)
}
#[test]
fn vpblendmq_17() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(ZMM27)), operand2: Some(Direct(ZMM28)), operand3: Some(IndirectDisplaced(RSI, 1870158351, Some(OperandSize::Zmmword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K7), broadcast: None }, &[98, 98, 157, 199, 100, 158, 15, 90, 120, 111], OperandSize::Qword)
}
#[test]
fn vpblendmq_18() {
run_test(&Instruction { mnemonic: Mnemonic::VPBLENDMQ, operand1: Some(Direct(ZMM7)), operand2: Some(Direct(ZMM4)), operand3: Some(IndirectScaledIndexedDisplaced(RDI, RDX, Eight, 1600288935, Some(OperandSize::Qword), None)), operand4: None, lock: false, rounding_mode: None, merge_mode: Some(MergeMode::Zero), sae: false, mask: Some(MaskReg::K3), broadcast: Some(BroadcastMode::Broadcast1To8) }, &[98, 242, 221, 219, 100, 188, 215, 167, 120, 98, 95], OperandSize::Qword)
}<|fim▁end|> | |
<|file_name|>example_complex.py<|end_file_name|><|fim▁begin|>#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows the complex DAG structure.
"""
from datetime import datetime
from airflow import models
from airflow.models.baseoperator import chain
from airflow.operators.bash import BashOperator
with models.DAG(
dag_id="example_complex",
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example', 'example2', 'example3'],
) as dag:
# Create
create_entry_group = BashOperator(task_id="create_entry_group", bash_command="echo create_entry_group")<|fim▁hole|> task_id="create_entry_group_result", bash_command="echo create_entry_group_result"
)
create_entry_group_result2 = BashOperator(
task_id="create_entry_group_result2", bash_command="echo create_entry_group_result2"
)
create_entry_gcs = BashOperator(task_id="create_entry_gcs", bash_command="echo create_entry_gcs")
create_entry_gcs_result = BashOperator(
task_id="create_entry_gcs_result", bash_command="echo create_entry_gcs_result"
)
create_entry_gcs_result2 = BashOperator(
task_id="create_entry_gcs_result2", bash_command="echo create_entry_gcs_result2"
)
create_tag = BashOperator(task_id="create_tag", bash_command="echo create_tag")
create_tag_result = BashOperator(task_id="create_tag_result", bash_command="echo create_tag_result")
create_tag_result2 = BashOperator(task_id="create_tag_result2", bash_command="echo create_tag_result2")
create_tag_template = BashOperator(task_id="create_tag_template", bash_command="echo create_tag_template")
create_tag_template_result = BashOperator(
task_id="create_tag_template_result", bash_command="echo create_tag_template_result"
)
create_tag_template_result2 = BashOperator(
task_id="create_tag_template_result2", bash_command="echo create_tag_template_result2"
)
create_tag_template_field = BashOperator(
task_id="create_tag_template_field", bash_command="echo create_tag_template_field"
)
create_tag_template_field_result = BashOperator(
task_id="create_tag_template_field_result", bash_command="echo create_tag_template_field_result"
)
create_tag_template_field_result2 = BashOperator(
task_id="create_tag_template_field_result2", bash_command="echo create_tag_template_field_result"
)
# Delete
delete_entry = BashOperator(task_id="delete_entry", bash_command="echo delete_entry")
create_entry_gcs >> delete_entry
delete_entry_group = BashOperator(task_id="delete_entry_group", bash_command="echo delete_entry_group")
create_entry_group >> delete_entry_group
delete_tag = BashOperator(task_id="delete_tag", bash_command="echo delete_tag")
create_tag >> delete_tag
delete_tag_template_field = BashOperator(
task_id="delete_tag_template_field", bash_command="echo delete_tag_template_field"
)
delete_tag_template = BashOperator(task_id="delete_tag_template", bash_command="echo delete_tag_template")
# Get
get_entry_group = BashOperator(task_id="get_entry_group", bash_command="echo get_entry_group")
get_entry_group_result = BashOperator(
task_id="get_entry_group_result", bash_command="echo get_entry_group_result"
)
get_entry = BashOperator(task_id="get_entry", bash_command="echo get_entry")
get_entry_result = BashOperator(task_id="get_entry_result", bash_command="echo get_entry_result")
get_tag_template = BashOperator(task_id="get_tag_template", bash_command="echo get_tag_template")
get_tag_template_result = BashOperator(
task_id="get_tag_template_result", bash_command="echo get_tag_template_result"
)
# List
list_tags = BashOperator(task_id="list_tags", bash_command="echo list_tags")
list_tags_result = BashOperator(task_id="list_tags_result", bash_command="echo list_tags_result")
# Lookup
lookup_entry = BashOperator(task_id="lookup_entry", bash_command="echo lookup_entry")
lookup_entry_result = BashOperator(task_id="lookup_entry_result", bash_command="echo lookup_entry_result")
# Rename
rename_tag_template_field = BashOperator(
task_id="rename_tag_template_field", bash_command="echo rename_tag_template_field"
)
# Search
search_catalog = BashOperator(task_id="search_catalog", bash_command="echo search_catalog")
search_catalog_result = BashOperator(
task_id="search_catalog_result", bash_command="echo search_catalog_result"
)
# Update
update_entry = BashOperator(task_id="update_entry", bash_command="echo update_entry")
update_tag = BashOperator(task_id="update_tag", bash_command="echo update_tag")
update_tag_template = BashOperator(task_id="update_tag_template", bash_command="echo update_tag_template")
update_tag_template_field = BashOperator(
task_id="update_tag_template_field", bash_command="echo update_tag_template_field"
)
# Create
create_tasks = [
create_entry_group,
create_entry_gcs,
create_tag_template,
create_tag_template_field,
create_tag,
]
chain(*create_tasks)
create_entry_group >> delete_entry_group
create_entry_group >> create_entry_group_result
create_entry_group >> create_entry_group_result2
create_entry_gcs >> delete_entry
create_entry_gcs >> create_entry_gcs_result
create_entry_gcs >> create_entry_gcs_result2
create_tag_template >> delete_tag_template_field
create_tag_template >> create_tag_template_result
create_tag_template >> create_tag_template_result2
create_tag_template_field >> delete_tag_template_field
create_tag_template_field >> create_tag_template_field_result
create_tag_template_field >> create_tag_template_field_result2
create_tag >> delete_tag
create_tag >> create_tag_result
create_tag >> create_tag_result2
# Delete
delete_tasks = [
delete_tag,
delete_tag_template_field,
delete_tag_template,
delete_entry_group,
delete_entry,
]
chain(*delete_tasks)
# Get
create_tag_template >> get_tag_template >> delete_tag_template
get_tag_template >> get_tag_template_result
create_entry_gcs >> get_entry >> delete_entry
get_entry >> get_entry_result
create_entry_group >> get_entry_group >> delete_entry_group
get_entry_group >> get_entry_group_result
# List
create_tag >> list_tags >> delete_tag
list_tags >> list_tags_result
# Lookup
create_entry_gcs >> lookup_entry >> delete_entry
lookup_entry >> lookup_entry_result
# Rename
create_tag_template_field >> rename_tag_template_field >> delete_tag_template_field
# Search
chain(create_tasks, search_catalog, delete_tasks)
search_catalog >> search_catalog_result
# Update
create_entry_gcs >> update_entry >> delete_entry
create_tag >> update_tag >> delete_tag
create_tag_template >> update_tag_template >> delete_tag_template
create_tag_template_field >> update_tag_template_field >> rename_tag_template_field<|fim▁end|> |
create_entry_group_result = BashOperator( |
<|file_name|>types.rs<|end_file_name|><|fim▁begin|>//! Exports Rust counterparts for all the common GLSL types, along with a few marker traits
use rasen::prelude::{Dim, TypeName};
use std::ops::{Add, Div, Index, Mul, Rem, Sub};
use crate::{
context::{Container, Context},
value::{IntoValue, Value},
};
pub trait AsTypeName {
const TYPE_NAME: &'static TypeName;
}
pub trait GenType: Copy {
fn zero() -> Self;
fn one() -> Self;
fn min(self, rhs: Self) -> Self;
fn max(self, rhs: Self) -> Self;
}
pub trait Numerical: GenType {
fn pow(self, rhs: Self) -> Self;
}
pub trait Floating: Numerical {
fn sqrt(self) -> Self;
fn floor(self) -> Self;
fn ceil(self) -> Self;
fn round(self) -> Self;
fn sin(self) -> Self;
fn cos(self) -> Self;
fn tan(self) -> Self;
fn ln(self) -> Self;
fn abs(self) -> Self;
}
pub trait Vector: GenType {
type Scalar: Numerical;
fn spread(v: Self::Scalar) -> Self;
}
pub trait VectorFloating: Vector
where
Self::Scalar: Floating,
{
fn dot(&self, rhs: &Self) -> Self::Scalar;
fn normalize(&self) -> Self;
fn length_squared(&self) -> Self::Scalar;
fn length(&self) -> Self::Scalar {
self.length_squared().sqrt()
}
}
pub trait Vector3: Vector {
fn cross(&self, rhs: &Self) -> Self;
}
pub trait Matrix {
fn inverse(self) -> Self;
}
include!(concat!(env!("OUT_DIR"), "/types.rs"));
#[derive(Copy, Clone, Debug)]
pub struct Sampler<V>(pub V);
impl<V: Vector> AsTypeName for Sampler<V>
where
<V as Vector>::Scalar: AsTypeName,
{
const TYPE_NAME: &'static TypeName =<|fim▁hole|> &TypeName::Sampler(<<V as Vector>::Scalar as AsTypeName>::TYPE_NAME, Dim::Dim2D);
}<|fim▁end|> | |
<|file_name|>test.ts<|end_file_name|><|fim▁begin|>/*
* @license Apache-2.0
*
* Copyright (c) 2020 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import snansumkbn2 = require( './index' );
// TESTS //
// The function returns a number...
{
const x = new Float32Array( 10 );
snansumkbn2( x.length, x, 1 ); // $ExpectType number
}
// The compiler throws an error if the function is provided a first argument which is not a number...
{
const x = new Float32Array( 10 );
snansumkbn2( '10', x, 1 ); // $ExpectError
snansumkbn2( true, x, 1 ); // $ExpectError
snansumkbn2( false, x, 1 ); // $ExpectError
snansumkbn2( null, x, 1 ); // $ExpectError
snansumkbn2( undefined, x, 1 ); // $ExpectError
snansumkbn2( [], x, 1 ); // $ExpectError
snansumkbn2( {}, x, 1 ); // $ExpectError
snansumkbn2( ( x: number ): number => x, x, 1 ); // $ExpectError
}
// The compiler throws an error if the function is provided a second argument which is not a Float32Array...
{
const x = new Float32Array( 10 );
snansumkbn2( x.length, 10, 1 ); // $ExpectError
snansumkbn2( x.length, '10', 1 ); // $ExpectError
snansumkbn2( x.length, true, 1 ); // $ExpectError
snansumkbn2( x.length, false, 1 ); // $ExpectError
snansumkbn2( x.length, null, 1 ); // $ExpectError
snansumkbn2( x.length, undefined, 1 ); // $ExpectError
snansumkbn2( x.length, [], 1 ); // $ExpectError
snansumkbn2( x.length, {}, 1 ); // $ExpectError
snansumkbn2( x.length, ( x: number ): number => x, 1 ); // $ExpectError
}
// The compiler throws an error if the function is provided a third argument which is not a number...
{
const x = new Float32Array( 10 );
snansumkbn2( x.length, x, '10' ); // $ExpectError
snansumkbn2( x.length, x, true ); // $ExpectError
snansumkbn2( x.length, x, false ); // $ExpectError
snansumkbn2( x.length, x, null ); // $ExpectError
snansumkbn2( x.length, x, undefined ); // $ExpectError
snansumkbn2( x.length, x, [] ); // $ExpectError
snansumkbn2( x.length, x, {} ); // $ExpectError
snansumkbn2( x.length, x, ( x: number ): number => x ); // $ExpectError
}
// The compiler throws an error if the function is provided an unsupported number of arguments...
{
const x = new Float32Array( 10 );
snansumkbn2(); // $ExpectError
snansumkbn2( x.length ); // $ExpectError
snansumkbn2( x.length, x ); // $ExpectError
snansumkbn2( x.length, x, 1, 10 ); // $ExpectError
}
// Attached to main export is an `ndarray` method which returns a number...
{
const x = new Float32Array( 10 );
snansumkbn2.ndarray( x.length, x, 1, 0 ); // $ExpectType number
}
// The compiler throws an error if the `ndarray` method is provided a first argument which is not a number...
{
const x = new Float32Array( 10 );
snansumkbn2.ndarray( '10', x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( true, x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( false, x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( null, x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( undefined, x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( [], x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( {}, x, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( ( x: number ): number => x, x, 1, 0 ); // $ExpectError
}
// The compiler throws an error if the `ndarray` method is provided a second argument which is not a Float32Array...
{
const x = new Float32Array( 10 );
snansumkbn2.ndarray( x.length, 10, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, '10', 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, true, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, false, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, null, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, undefined, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, [], 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, {}, 1, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, ( x: number ): number => x, 1, 0 ); // $ExpectError
}
// The compiler throws an error if the `ndarray` method is provided a third argument which is not a number...
{
const x = new Float32Array( 10 );
snansumkbn2.ndarray( x.length, x, '10', 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, true, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, false, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, null, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, undefined, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, [], 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, {}, 0 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, ( x: number ): number => x, 0 ); // $ExpectError
}
// The compiler throws an error if the `ndarray` method is provided a fourth argument which is not a number...
{
const x = new Float32Array( 10 );
snansumkbn2.ndarray( x.length, x, 1, '10' ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, true ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, false ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, null ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, undefined ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, [] ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, {} ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, ( x: number ): number => x ); // $ExpectError
}<|fim▁hole|> const x = new Float32Array( 10 );
snansumkbn2.ndarray(); // $ExpectError
snansumkbn2.ndarray( x.length ); // $ExpectError
snansumkbn2.ndarray( x.length, x ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1 ); // $ExpectError
snansumkbn2.ndarray( x.length, x, 1, 0, 10 ); // $ExpectError
}<|fim▁end|> |
// The compiler throws an error if the `ndarray` method is provided an unsupported number of arguments...
{ |
<|file_name|>maketables.go<|end_file_name|><|fim▁begin|>// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This program generates tables.go:
// go run maketables.go | gofmt > tables.go
import (
"bufio"
"fmt"
"log"
"net/http"
"sort"
"strings"
"unicode/utf8"
"golang.org/x/text/encoding"
)
const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" +
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" +
` !"#$%&'()*+,-./0123456789:;<=>?` +
`@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` +
"`abcdefghijklmnopqrstuvwxyz{|}~\u007f"
var encodings = []struct {
name string
mib string
comment string
varName string
replacement byte
mapping string
}{
{
"IBM Code Page 437",
"PC8CodePage437",
"",
"CodePage437",
encoding.ASCIISub,
ascii +
"ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒ" +
"áíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐" +
"└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀" +
"αßΓπΣσµτΦΘΩδ∞∅∈∩≡±≥≤⌠⌡÷≈°•·√ⁿ²∎\u00a0",
},
{
"IBM Code Page 866",
"IBM866",
"",
"CodePage866",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-ibm866.txt",
},
{
"ISO 8859-2",
"ISOLatin2",
"",
"ISO8859_2",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-2.txt",
},
{
"ISO 8859-3",
"ISOLatin3",
"",
"ISO8859_3",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-3.txt",
},
{
"ISO 8859-4",
"ISOLatin4",
"",
"ISO8859_4",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-4.txt",
},
{
"ISO 8859-5",
"ISOLatinCyrillic",
"",
"ISO8859_5",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-5.txt",
},
{
"ISO 8859-6",
"ISOLatinArabic",
"",
"ISO8859_6,ISO8859_6E,ISO8859_6I",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-6.txt",
},
{
"ISO 8859-7",
"ISOLatinGreek",
"",
"ISO8859_7",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-7.txt",
},
{
"ISO 8859-8",
"ISOLatinHebrew",
"",
"ISO8859_8,ISO8859_8E,ISO8859_8I",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-8.txt",
},
{
"ISO 8859-10",
"ISOLatin6",
"",
"ISO8859_10",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-10.txt",
},
{
"ISO 8859-13",
"ISO885913",
"",
"ISO8859_13",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-13.txt",
},
{
"ISO 8859-14",
"ISO885914",
"",
"ISO8859_14",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-14.txt",
},
{
"ISO 8859-15",
"ISO885915",
"",
"ISO8859_15",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-15.txt",
},
{
"ISO 8859-16",
"ISO885916",
"",
"ISO8859_16",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-iso-8859-16.txt",
},
{
"KOI8-R",
"KOI8R",
"",
"KOI8R",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-koi8-r.txt",
},
{
"KOI8-U",
"KOI8U",
"",
"KOI8U",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-koi8-u.txt",
},
{
"Macintosh",
"Macintosh",
"",
"Macintosh",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-macintosh.txt",
},
{
"Macintosh Cyrillic",
"MacintoshCyrillic",
"",
"MacintoshCyrillic",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt",
},
{
"Windows 874",
"Windows874",
"",
"Windows874",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-874.txt",
},
{
"Windows 1250",
"Windows1250",
"",
"Windows1250",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1250.txt",
},
{
"Windows 1251",
"Windows1251",
"",
"Windows1251",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1251.txt",
},
{
"Windows 1252",
"Windows1252",
"",
"Windows1252",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1252.txt",
},
{
"Windows 1253",
"Windows1253",
"",<|fim▁hole|> encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1253.txt",
},
{
"Windows 1254",
"Windows1254",
"",
"Windows1254",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1254.txt",
},
{
"Windows 1255",
"Windows1255",
"",
"Windows1255",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1255.txt",
},
{
"Windows 1256",
"Windows1256",
"",
"Windows1256",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1256.txt",
},
{
"Windows 1257",
"Windows1257",
"",
"Windows1257",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1257.txt",
},
{
"Windows 1258",
"Windows1258",
"",
"Windows1258",
encoding.ASCIISub,
"http://encoding.spec.whatwg.org/index-windows-1258.txt",
},
{
"X-User-Defined",
"XUserDefined",
"It is defined at http://encoding.spec.whatwg.org/#x-user-defined",
"XUserDefined",
encoding.ASCIISub,
ascii +
"\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" +
"\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" +
"\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" +
"\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" +
"\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" +
"\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" +
"\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" +
"\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" +
"\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" +
"\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" +
"\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" +
"\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" +
"\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" +
"\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" +
"\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" +
"\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff",
},
}
func getWHATWG(url string) string {
res, err := http.Get(url)
if err != nil {
log.Fatalf("%q: Get: %v", url, err)
}
defer res.Body.Close()
mapping := make([]rune, 128)
for i := range mapping {
mapping[i] = '\ufffd'
}
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if s == "" || s[0] == '#' {
continue
}
x, y := 0, 0
if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil {
log.Fatalf("could not parse %q", s)
}
if x < 0 || 128 <= x {
log.Fatalf("code %d is out of range", x)
}
if 0x80 <= y && y < 0xa0 {
// We diverge from the WHATWG spec by mapping control characters
// in the range [0x80, 0xa0) to U+FFFD.
continue
}
mapping[x] = rune(y)
}
return ascii + string(mapping)
}
func main() {
mibs := map[string]bool{}
all := []string{}
buf := make([]byte, 8)
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
fmt.Printf("package charmap\n\n")
fmt.Printf("import (\n")
fmt.Printf("\t\"golang.org/x/text/encoding\"\n")
fmt.Printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n")
fmt.Printf(")\n\n")
for _, e := range encodings {
varNames := strings.Split(e.varName, ",")
all = append(all, varNames...)
varName := varNames[0]
if strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/") {
e.mapping = getWHATWG(e.mapping)
}
asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00
if asciiSuperset {
low = 0x80
}
lvn := 1
if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") {
lvn = 3
}
lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:]
fmt.Printf("// %s is the %s encoding.\n", varName, e.name)
if e.comment != "" {
fmt.Printf("//\n// %s\n", e.comment)
}
fmt.Printf("var %s encoding.Encoding = &%s\n\nvar %s = charmap{\nname: %q,\n",
varName, lowerVarName, lowerVarName, e.name)
if mibs[e.mib] {
log.Fatalf("MIB type %q declared multiple times.", e.mib)
}
fmt.Printf("mib: identifier.%s,\n", e.mib)
fmt.Printf("asciiSuperset: %t,\n", asciiSuperset)
fmt.Printf("low: 0x%02x,\n", low)
fmt.Printf("replacement: 0x%02x,\n", e.replacement)
fmt.Printf("decode: [256]utf8Enc{\n")
i, backMapping := 0, map[rune]byte{}
for _, c := range e.mapping {
if _, ok := backMapping[c]; !ok {
backMapping[c] = byte(i)
}
for j := range buf {
buf[j] = 0
}
n := utf8.EncodeRune(buf, c)
if n > 3 {
panic(fmt.Sprintf("rune %q (%U) is too long", c, c))
}
fmt.Printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2])
if i%2 == 1 {
fmt.Printf("\n")
}
i++
}
fmt.Printf("},\n")
fmt.Printf("encode: [256]uint32{\n")
encode := make([]uint32, 0, 256)
for c, i := range backMapping {
encode = append(encode, uint32(i)<<24|uint32(c))
}
sort.Sort(byRune(encode))
for len(encode) < cap(encode) {
encode = append(encode, encode[len(encode)-1])
}
for i, enc := range encode {
fmt.Printf("0x%08x,", enc)
if i%8 == 7 {
fmt.Printf("\n")
}
}
fmt.Printf("},\n}\n")
}
// TODO: add proper line breaking.
fmt.Printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n"))
}
type byRune []uint32
func (b byRune) Len() int { return len(b) }
func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff }
func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] }<|fim▁end|> | "Windows1253", |
<|file_name|>generic-newtype-struct.rs<|end_file_name|><|fim▁begin|>struct S<T>(T);
pub fn main() {
let s = S(2i);<|fim▁hole|>}<|fim▁end|> | println(s.to_str()); |
<|file_name|>objectAt.js<|end_file_name|><|fim▁begin|>import {SuiteModuleBuilder} from 'ember-runtime/tests/suites/suite';
import {fmt} from 'ember-runtime/system/string';
var suite = SuiteModuleBuilder.create();
suite.module('objectAt');
suite.test('should return object at specified index', function() {
var expected = this.newFixture(3);
var obj = this.newObject(expected);
var len = expected.length;
var idx;
for (idx=0;idx<len;idx++) {
equal(obj.objectAt(idx), expected[idx], fmt('obj.objectAt(%@) should match', [idx]));
}
});
<|fim▁hole|> equal(obj.objectAt(5), undefined, 'should return undefined for obj.objectAt(5) when len = 3');
obj = this.newObject([]);
equal(obj.objectAt(0), undefined, 'should return undefined for obj.objectAt(0) when len = 0');
});
export default suite;<|fim▁end|> | suite.test('should return undefined when requesting objects beyond index', function() {
var obj;
obj = this.newObject(this.newFixture(3)); |
<|file_name|>SupportWorkspace.tsx<|end_file_name|><|fim▁begin|>import { useCurrentStateAndParams } from '@uirouter/react';
import { useEffect, useState, FunctionComponent } from 'react';
import { useDispatch } from 'react-redux';
import { translate } from '@waldur/i18n';
import {
setBreadcrumbs,
useBreadcrumbsFn,
} from '@waldur/navigation/breadcrumbs/store';
import { BreadcrumbItem } from '@waldur/navigation/breadcrumbs/types';
import { Layout } from '@waldur/navigation/Layout';
import { setCurrentWorkspace } from '@waldur/workspace/actions';
import { SUPPORT_WORKSPACE } from '@waldur/workspace/types';
import { IssueNavigationService } from './IssueNavigationService';
import { SupportSidebar } from './SupportSidebar';
function getBreadcrumbs(): BreadcrumbItem[] {
return [
{
label: translate('Support dashboard'),
action: () => IssueNavigationService.gotoDashboard(),
},
];
}
export function useReportingBreadcrumbs() {
const dispatch = useDispatch();
useEffect(() => {
dispatch(
setBreadcrumbs([
...getBreadcrumbs(),
{
label: translate('Reporting'),
},
]),
);
return () => {
dispatch(setBreadcrumbs(getBreadcrumbs()));
};
});
}
export const SupportWorkspace: FunctionComponent = () => {<|fim▁hole|>
function refreshState() {
const data = state?.data;
setPageClass(data?.pageClass);
setHideBreadcrumbs(data?.hideBreadcrumbs);
}
useBreadcrumbsFn(getBreadcrumbs, []);
useEffect(() => {
dispatch(setCurrentWorkspace(SUPPORT_WORKSPACE));
}, [dispatch]);
useEffect(refreshState, [state, params]);
return (
<Layout
sidebar={<SupportSidebar />}
pageClass={pageClass}
hideBreadcrumbs={hideBreadcrumbs}
/>
);
};<|fim▁end|> | const [pageClass, setPageClass] = useState<string>();
const [hideBreadcrumbs, setHideBreadcrumbs] = useState<boolean>();
const { state, params } = useCurrentStateAndParams();
const dispatch = useDispatch(); |
<|file_name|>test_parse.js<|end_file_name|><|fim▁begin|>var assert = require('assert')
var parse = require('../').parse
function addTest(arg, bulk) {
function fn_json5() {
//console.log('testing: ', arg)
try {
var x = parse(arg)
} catch(err) {
x = 'fail'
}
try {
var z = eval('(function(){"use strict"\nreturn ('+String(arg)+'\n)\n})()')
} catch(err) {
z = 'fail'
}
assert.deepEqual(x, z)
}
function fn_strict() {
//console.log('testing: ', arg)
try {
var x = parse(arg, {mode: 'json'})
} catch(err) {
x = 'fail'<|fim▁hole|> }
try {
var z = JSON.parse(arg)
} catch(err) {
z = 'fail'
}
assert.deepEqual(x, z)
}
if (typeof(describe) === 'function' && !bulk) {
it('test_parse_json5: ' + JSON.stringify(arg), fn_json5)
it('test_parse_strict: ' + JSON.stringify(arg), fn_strict)
} else {
fn_json5()
fn_strict()
}
}
addTest('"\\uaaaa\\u0000\\uFFFF\\uFaAb"')
addTest(' "\\xaa\\x00\xFF\xFa\0\0" ')
addTest('"\\\'\\"\\b\\f\\t\\n\\r\\v"')
addTest('"\\q\\w\\e\\r\\t\\y\\\\i\\o\\p\\[\\/\\\\"')
addTest('"\\\n\\\r\n\\\n"')
addTest('\'\\\n\\\r\n\\\n\'')
addTest(' null')
addTest('true ')
addTest('false')
addTest(' Infinity ')
addTest('+Infinity')
addTest('[]')
addTest('[ 0xA2, 0X024324AaBf]')
addTest('-0x12')
addTest(' [1,2,3,4,5]')
addTest('[1,2,3,4,5,] ')
addTest('[1e-13]')
addTest('[null, true, false]')
addTest(' [1,2,"3,4,",5,]')
addTest('[ 1,\n2,"3,4," \r\n,\n5,]')
addTest('[ 1 , 2 , 3 , 4 , 5 , ]')
addTest('{} ')
addTest('{"2":1,"3":null,}')
addTest('{ "2 " : 1 , "3":null , }')
addTest('{ \"2\" : 25e245 , \"3\": 23 }')
addTest('{"2":1,"3":nul,}')
addTest('{:1,"3":nul,}')
addTest('[1,2] // ssssssssss 3,4,5,] ')
addTest('[1,2 , // ssssssssss \n//xxx\n3,4,5,] ')
addTest('[1,2 /* ssssssssss 3,4,*/ /* */ , 5 ] ')
addTest('[1,2 /* ssssssssss 3,4,*/ /* * , 5 ] ')
addTest('{"3":1,"3":,}')
addTest('{ чйуач:1, щцкшчлм : 4,}')
addTest('{ qef-:1 }')
addTest('{ $$$:1 , ___: 3}')
addTest('{3:1,2:1}')
addTest('{3.4e3:1}')
addTest('{-3e3:1}')
addTest('{+3e3:1}')
addTest('{.3e3:1}')
for (var i=0; i<200; i++) {
addTest('"' + String.fromCharCode(i) + '"', true)
}
// strict JSON test cases
addTest('"\\xaa"')
addTest('"\\0"')
addTest('"\0"')
addTest('"\\v"')
addTest('{null: 123}')
addTest("{'null': 123}")
assert.throws(function() {
parse('0o')
})
assert.strictEqual(parse('01234567'), 342391)
assert.strictEqual(parse('0o1234567'), 342391)
// undef
assert.strictEqual(parse(undefined), undefined)
// whitespaces
addTest('[1,\r\n2,\r3,\n]')
'\u0020\u00A0\uFEFF\x09\x0A\x0B\x0C\x0D\u1680\u180E\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200A\u2028\u2029\u202F\u205F\u3000'.split('').forEach(function(x) {
addTest(x+'[1,'+x+'2]'+x)
addTest('"'+x+'"'+x)
})
'\u000A\u000D\u2028\u2029'.split('').forEach(function(x) {
addTest(x+'[1,'+x+'2]'+x)
addTest('"\\'+x+'"'+x)
})
/* weird ES6 stuff, not working
if (process.version > 'v0.11.7') {
assert(Array.isArray(parse('{__proto__:[]}').__proto__))
assert.equal(parse('{__proto__:{xxx:5}}').xxx, undefined)
assert.equal(parse('{__proto__:{xxx:5}}').__proto__.xxx, 5)
var o1 = parse('{"__proto__":[]}')
assert.deepEqual([], o1.__proto__)
assert.deepEqual(["__proto__"], Object.keys(o1))
assert.deepEqual([], Object.getOwnPropertyDescriptor(o1, "__proto__").value)
assert.deepEqual(["__proto__"], Object.getOwnPropertyNames(o1))
assert(o1.hasOwnProperty("__proto__"))
assert(Object.prototype.isPrototypeOf(o1))
// Parse a non-object value as __proto__.
var o2 = JSON.parse('{"__proto__":5}')
assert.deepEqual(5, o2.__proto__)
assert.deepEqual(["__proto__"], Object.keys(o2))
assert.deepEqual(5, Object.getOwnPropertyDescriptor(o2, "__proto__").value)
assert.deepEqual(["__proto__"], Object.getOwnPropertyNames(o2))
assert(o2.hasOwnProperty("__proto__"))
assert(Object.prototype.isPrototypeOf(o2))
}*/
assert.throws(parse.bind(null, "{-1:42}"))
for (var i=0; i<100; i++) {
var str = '-01.e'.split('')
var rnd = [1,2,3,4,5].map(function(x) {
x = ~~(Math.random()*str.length)
return str[x]
}).join('')
try {
var x = parse(rnd)
} catch(err) {
x = 'fail'
}
try {
var y = JSON.parse(rnd)
} catch(err) {
y = 'fail'
}
try {
var z = eval(rnd)
} catch(err) {
z = 'fail'
}
//console.log(rnd, x, y, z)
if (x !== y && x !== z) throw 'ERROR'
}<|fim▁end|> | |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import functools
import httplib as http
import logging
import time
import bleach
from django.db.models import Q
from flask import request
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError
from framework import sentry
from website import language
from osf.models import OSFUser, AbstractNode
from website import settings
from website.project.views.contributor import get_node_contributors_abbrev
from website.ember_osf_web.decorators import ember_flag_is_active
from website.search import exceptions
import website.search.search as search
from website.search.util import build_query
logger = logging.getLogger(__name__)
RESULTS_PER_PAGE = 250
def handle_search_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.MalformedQueryError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad search query',
'message_long': language.SEARCH_QUERY_HELP,
})
except exceptions.SearchUnavailableError:
raise HTTPError(http.SERVICE_UNAVAILABLE, data={
'message_short': 'Search unavailable',
'message_long': ('Our search service is currently unavailable, if the issue persists, '
+ language.SUPPORT_LINK),
})
except exceptions.SearchException:
# Interim fix for issue where ES fails with 500 in some settings- ensure exception is still logged until it can be better debugged. See OSF-4538
sentry.log_exception()
sentry.log_message('Elasticsearch returned an unexpected error response')
# TODO: Add a test; may need to mock out the error response due to inability to reproduce error code locally
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Could not perform search query',
'message_long': language.SEARCH_QUERY_HELP,
})
return wrapped
@handle_search_errors
def search_search(**kwargs):
_type = kwargs.get('type', None)
tick = time.time()
results = {}
if request.method == 'POST':
results = search.search(request.get_json(), doc_type=_type)
elif request.method == 'GET':
q = request.args.get('q', '*')
# TODO Match javascript params?
start = request.args.get('from', '0')
size = request.args.get('size', '10')
results = search.search(build_query(q, start, size), doc_type=_type)
results['time'] = round(time.time() - tick, 2)
return results
@ember_flag_is_active('ember_search_page')
def search_view():
return {'shareUrl': settings.SHARE_URL},
def conditionally_add_query_item(query, item, condition, value):
""" Helper for the search_projects_by_title function which will add a condition to a query
It will give an error if the proper search term is not used.
:param query: The modular ODM query that you want to modify
:param item: the field to query on
:param condition: yes, no, or either
:return: the modified query
"""
condition = condition.lower()
if condition == 'yes':
return query & Q(**{item: value})
elif condition == 'no':
return query & ~Q(**{item: value})
elif condition == 'either':
return query
raise HTTPError(http.BAD_REQUEST)
@must_be_logged_in
def search_projects_by_title(**kwargs):
""" Search for nodes by title. Can pass in arguments from the URL to modify the search
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
# TODO(fabianvf): At some point, it would be nice to do this with elastic search
user = kwargs['auth'].user
term = request.args.get('term', '')
max_results = int(request.args.get('maxResults', '10'))
category = request.args.get('category', 'project').lower()
is_deleted = request.args.get('isDeleted', 'no').lower()
is_collection = request.args.get('isFolder', 'no').lower()
is_registration = request.args.get('isRegistration', 'no').lower()
include_public = request.args.get('includePublic', 'yes').lower()
include_contributed = request.args.get('includeContributed', 'yes').lower()
ignore_nodes = request.args.getlist('ignoreNode', [])
matching_title = Q(
title__icontains=term, # search term (case insensitive)
category=category # is a project
)
matching_title = conditionally_add_query_item(matching_title, 'is_deleted', is_deleted, True)
matching_title = conditionally_add_query_item(matching_title, 'type', is_registration, 'osf.registration')
matching_title = conditionally_add_query_item(matching_title, 'type', is_collection, 'osf.collection')
if len(ignore_nodes) > 0:
for node_id in ignore_nodes:
matching_title = matching_title & ~Q(_id=node_id)
my_projects = []
my_project_count = 0
public_projects = []
if include_contributed == 'yes':
my_projects = AbstractNode.objects.filter(
matching_title &
Q(_contributors=user) # user is a contributor
)[:max_results]
my_project_count = my_project_count
if my_project_count < max_results and include_public == 'yes':
public_projects = AbstractNode.objects.filter(
matching_title &
Q(is_public=True) # is public
)[:max_results - my_project_count]
results = list(my_projects) + list(public_projects)
ret = process_project_search_results(results, **kwargs)
return ret
@must_be_logged_in
def process_project_search_results(results, **kwargs):
"""
:param results: list of projects from the modular ODM search
:return: we return the entire search result, which is a list of
dictionaries. This includes the list of contributors.
"""
user = kwargs['auth'].user
ret = []
for project in results:
authors = get_node_contributors_abbrev(project=project, auth=kwargs['auth'])
authors_html = ''
for author in authors['contributors']:
a = OSFUser.load(author['user_id'])
authors_html += '<a href="%s">%s</a>' % (a.url, a.fullname)
authors_html += author['separator'] + ' '
authors_html += ' ' + authors['others_count']
ret.append({
'id': project._id,
'label': project.title,
'value': project.title,
'category': 'My Projects' if user in project.contributors else 'Public Projects',<|fim▁hole|> })
return ret
@collect_auth
def search_contributor(auth):
user = auth.user if auth else None
nid = request.args.get('excludeNode')
exclude = AbstractNode.load(nid).contributors if nid else []
# TODO: Determine whether bleach is appropriate for ES payload. Also, inconsistent with website.sanitize.util.strip_html
query = bleach.clean(request.args.get('query', ''), tags=[], strip=True)
page = int(bleach.clean(request.args.get('page', '0'), tags=[], strip=True))
size = int(bleach.clean(request.args.get('size', '5'), tags=[], strip=True))
return search.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=user)<|fim▁end|> | 'authors': authors_html, |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import find_packages
from os import path, environ
import io
import os
import re
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
# pip's single-source version method as described here:
# https://python-packaging-user-guide.readthedocs.io/single_source_version/
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# fetch include and library directories
inc_dirs = [np.get_include(), '/usr/local/include/librealsense']
lib_dirs = ['/usr/local/lib']
# windows environment variables
if 'PYRS_INCLUDES' in environ:
inc_dirs.append(environ['PYRS_INCLUDES'])
if 'PYRS_LIBS' in environ:
lib_dirs.append(environ['PYRS_LIBS'])
# cython extension, dont build if docs
on_rtd = environ.get('READTHEDOCS') == 'True'
if on_rtd:
module = []
else:
module = cythonize(
[Extension(
name='pyrealsense.rsutilwrapper',
sources=["pyrealsense/rsutilwrapper.pyx", "pyrealsense/rsutilwrapperc.cpp"],
libraries=['realsense'],
include_dirs=inc_dirs,
library_dirs=lib_dirs,
language="c++",)])
# create long description from readme for pypi
here = path.abspath(path.dirname(__file__))
with io.open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(name='pyrealsense',
version=find_version('pyrealsense', '__init__.py'),<|fim▁hole|>
description='Cross-platform ctypes/Cython wrapper to the librealsense library.',
long_description=long_description,
author='Antoine Loriette',
author_email='[email protected]',
url='https://github.com/toinsson/pyrealsense',
license='Apache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
# 'License :: OSem :: Hardware',
],
keywords='realsense',
packages=find_packages(),
ext_modules=module,
setup_requires=['numpy', 'cython'],
install_requires=['numpy', 'cython', 'pycparser', 'six'])<|fim▁end|> | |
<|file_name|>events.py<|end_file_name|><|fim▁begin|># sqlalchemy/events.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event
from . import exc
from . import util
from .engine import Connectable
from .engine import Dialect
from .engine import Engine
from .pool import Pool
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`_schema.MetaData`, :class:`_schema.Table`,
:class:`_schema.Column`.
:class:`_schema.MetaData` and :class:`_schema.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`_schema.Column` is associated
with its :class:`_schema.Table`, when a
:class:`_schema.ForeignKeyConstraint`
is associated with a :class:`_schema.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
For all :class:`.DDLEvent` events, the ``propagate=True`` keyword argument
will ensure that a given event handler is propagated to copies of the
object, which are made when using the :meth:`_schema.Table.tometadata`
method::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
propagate=True
)
new_table = some_table.tometadata(new_metadata)
The above :class:`.DDL` object will also be associated with the
:class:`_schema.Table` object represented by ``new_table``.
.. seealso::
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
r"""Called before CREATE statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def after_create(self, target, connection, **kw):
r"""Called after CREATE statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def before_drop(self, target, connection, **kw):
r"""Called before DROP statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def after_drop(self, target, connection, **kw):
r"""Called after DROP statements are emitted.
:param target: the :class:`_schema.MetaData` or :class:`_schema.Table`
object which is the target of the event.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`_schema.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`:
* ``name`` - the column's name
* ``type`` - the type of this column, which should be an instance
of :class:`~sqlalchemy.types.TypeEngine`
* ``nullable`` - boolean flag if the column is NULL or NOT NULL
* ``default`` - the column's server default value. This is
normally specified as a plain string SQL expression, however the
event can pass a :class:`.FetchedValue`, :class:`.DefaultClause`,
or :func:`_expression.text` object as well.
.. versionchanged:: 1.1.6
The :meth:`.DDLEvents.column_reflect` event allows a non
string :class:`.FetchedValue`,
:func:`_expression.text`, or derived object to be
specified as the value of ``default`` in the column
dictionary.
* ``attrs`` - dict containing optional column attributes
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`_schema.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`_schema.Column`.
Note that this event is only meaningful if either
associated with the :class:`_schema.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`_schema.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for
:class:`_schema.Table`.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.tometadata` is used.
"""
class PoolEvents(event.Events):
"""Available events for :class:`_pool.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`_pool.Pool` class and
:class:`_pool.Pool` instances, :class:`_events.PoolEvents` also accepts
:class:`_engine.Engine` objects and the :class:`_engine.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`_pool.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`_pool.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`_pool.Pool`.
The rationale for :meth:`_events.PoolEvents.first_connect`
is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`_pool.Pool`
refers to a single "creator" function (which in terms
of a :class:`_engine.Engine`
refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent
connections, such as the database version, the server and client
encoding settings, collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the
lifespan of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`_events.ConnectionEvents.engine_connect`
- a similar event
which occurs upon creation of a new :class:`_engine.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`_events.PoolEvents.reset` event is usually followed by the
:meth:`_events.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. seealso::
:meth:`_events.ConnectionEvents.rollback`
:meth:`_events.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation",
without the ``soft`` flag.
The event occurs before a final attempt to call ``.close()`` on the
connection occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
def soft_invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "soft invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked with the ``soft`` flag.
Soft invalidation refers to when the connection record that tracks
this connection will force a reconnect after the current connection
is checked in. It does not actively close the dbapi_connection
at the point at which it is called.
.. versionadded:: 1.0.3
"""
def close(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
The :meth:`.close` event corresponds to a connection that's still
associated with the pool. To intercept close events for detached
connections use :meth:`.close_detached`.
.. versionadded:: 1.1
"""
def detach(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is "detached" from a pool.
This event is emitted after the detach occurs. The connection
is no longer associated with the given connection record.
.. versionadded:: 1.1
"""
def close_detached(self, dbapi_connection):
"""Called when a detached DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
.. versionadded:: 1.1
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`_engine.Connection` and :class:`_engine.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`_engine.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s", statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`_engine.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s", statement)
When the methods are called with a `statement` parameter, such as in
:meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and
:meth:`.dbapi_error`, the statement is the exact SQL string that was
prepared for transmission to the DBAPI ``cursor`` in the connection's
:class:`.Dialect`.
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`_events.ConnectionEvents` can be established on any
combination of :class:`_engine.Engine`, :class:`_engine.Connection`,
as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`_engine.Connection`. However, for performance reasons, the
:class:`_engine.Connection` object determines at instantiation time
whether or not its parent :class:`_engine.Engine` has event listeners
established. Event listeners added to the :class:`_engine.Engine`
class or to an instance of :class:`_engine.Engine`
*after* the instantiation
of a dependent :class:`_engine.Connection` instance will usually
*not* be available on that :class:`_engine.Connection` instance.
The newly
added listeners will instead take effect for
:class:`_engine.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`_engine.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = (
event_key.dispatch_target,
event_key.identifier,
event_key._listen_fn,
)
target._has_events = True
if not retval:
if identifier == "before_execute":
orig_fn = fn
def wrap_before_execute(
conn, clauseelement, multiparams, params
):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == "before_cursor_execute":
orig_fn = fn
def wrap_before_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
orig_fn(
conn,
cursor,
statement,
parameters,
context,
executemany,
)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and identifier not in (
"before_execute",
"before_cursor_execute",
"handle_error",
):
raise exc.ArgumentError(
"Only the 'before_execute', "
"'before_cursor_execute' and 'handle_error' engine "
"event listeners accept the 'retval=True' "
"argument."
)
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`_engine.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to
:meth:`_engine.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
.. seealso::
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`_engine.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to
:meth:`_engine.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`_engine.ResultProxy` generated by the execution
.
"""
def before_cursor_execute(
self, conn, cursor, statement, parameters, context, executemany
):
"""Intercept low-level cursor execute() events before execution,
receiving the string SQL statement and DBAPI-specific parameter list to
be invoked against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`_events.ConnectionEvents`.
:param conn: :class:`_engine.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as to be passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
.. seealso::
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(
self, conn, cursor, statement, parameters, context, executemany
):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`_engine.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`_engine.ResultProxy`.
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
@util.deprecated(
"0.9",
"The :meth:`_events.ConnectionEvents.dbapi_error` "
"event is deprecated and will be removed in a future release. "
"Please refer to the :meth:`_events.ConnectionEvents.handle_error` "
"event.",
)
def dbapi_error(
self, conn, cursor, statement, parameters, context, exception
):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`_engine.Engine`, typically for logging and
debugging purposes.
.. warning::
Code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines. For exception modification, please refer to the
new :meth:`_events.ConnectionEvents.handle_error` event.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`_engine.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
"""
def handle_error(self, exception_context):
r"""Intercept all exceptions processed by the
:class:`_engine.Connection`.
This includes all exceptions emitted by the DBAPI as well as
within SQLAlchemy's statement invocation process, including
encoding errors and other statement validation errors. Other areas
in which the event is invoked include transaction begin and end,
result row fetching, cursor creation.
Note that :meth:`.handle_error` may support new kinds of exceptions
and new calling scenarios at *any time*. Code which uses this
event must expect new calling patterns to be present in minor
releases.
To support the wide variety of members that correspond to an exception,
as well as to allow extensibility of the event without backwards
incompatibility, the sole argument received is an instance of
:class:`.ExceptionContext`. This object contains data members
representing detail about the exception.
Use cases supported by this hook include:
* read-only, low-level exception handling for logging and
debugging purposes
* exception re-writing
* Establishing or disabling whether a connection or the owning
connection pool is invalidated or expired in response to a
specific exception.
The hook is called while the cursor from the failed operation
(if any) is still open and accessible. Special cleanup operations
can be called on this cursor; SQLAlchemy will attempt to close
this cursor subsequent to this hook being invoked. If the connection
is in "autocommit" mode, the transaction also remains open within
the scope of this hook; the rollback of the per-statement transaction
also occurs after the hook is called.
For the common case of detecting a "disconnect" situation which
is not currently handled by the SQLAlchemy dialect, the
:attr:`.ExceptionContext.is_disconnect` flag can be set to True which
will cause the exception to be considered as a disconnect situation,
which typically results in the connection pool being invalidated::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception, pyodbc.Error):
for code in (
'08S01', '01002', '08003',
'08007', '08S02', '08001', 'HYT00', 'HY010'):
if code in str(context.original_exception):
context.is_disconnect = True
A handler function has two options for replacing
the SQLAlchemy-constructed exception into one that is user
defined. It can either raise this new exception directly, in
which case all further event listeners are bypassed and the
exception will be raised, after appropriate cleanup as taken
place::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception,
psycopg2.OperationalError) and \
"failed" in str(context.original_exception):
raise MySpecialException("failed operation")
.. warning:: Because the
:meth:`_events.ConnectionEvents.handle_error`
event specifically provides for exceptions to be re-thrown as
the ultimate exception raised by the failed statement,
**stack traces will be misleading** if the user-defined event
handler itself fails and throws an unexpected exception;
the stack trace may not illustrate the actual code line that
failed! It is advised to code carefully here and use
logging and/or inline debugging if unexpected exceptions are
occurring.
Alternatively, a "chained" style of event handling can be
used, by configuring the handler with the ``retval=True``
modifier and returning the new exception instance from the
function. In this case, event handling will continue onto the
next handler. The "chained" exception is available using
:attr:`.ExceptionContext.chained_exception`::
@event.listens_for(Engine, "handle_error", retval=True)
def handle_exception(context):
if context.chained_exception is not None and \
"special" in context.chained_exception.message:
return MySpecialException("failed",
cause=context.chained_exception)
Handlers that return ``None`` may be used within the chain; when
a handler returns ``None``, the previous exception instance,
if any, is maintained as the current exception that is passed onto the
next handler.
When a custom exception is raised or returned, SQLAlchemy raises
this new exception as-is, it is not wrapped by any SQLAlchemy
object. If the exception is not a subclass of
:class:`sqlalchemy.exc.StatementError`,
certain features may not be available; currently this includes
the ORM's feature of adding a detail hint about "autoflush" to
exceptions raised within the autoflush process.
:param context: an :class:`.ExceptionContext` object. See this
class for details on all available members.
.. versionadded:: 0.9.7 Added the
:meth:`_events.ConnectionEvents.handle_error` hook.
.. versionchanged:: 1.1 The :meth:`.handle_error` event will now
receive all exceptions that inherit from ``BaseException``,
including ``SystemExit`` and ``KeyboardInterrupt``. The setting for
:attr:`.ExceptionContext.is_disconnect` is ``True`` in this case and
the default for
:attr:`.ExceptionContext.invalidate_pool_on_disconnect` is
``False``.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now
invoked when an :class:`_engine.Engine` fails during the initial
call to :meth:`_engine.Engine.connect`, as well as when a
:class:`_engine.Connection` object encounters an error during a
reconnect operation.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is
not fired off when a dialect makes use of the
``skip_user_error_events`` execution option. This is used
by dialects which intend to catch SQLAlchemy-specific exceptions
within specific operations, such as when the MySQL dialect detects
a table not present within the ``has_table()`` dialect method.
Prior to 1.0.0, code which implements :meth:`.handle_error` needs
to ensure that exceptions thrown in these scenarios are re-raised
without modification.
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`_engine.Connection`.
This event is called typically as the direct result of calling
the :meth:`_engine.Engine.connect` method.
It differs from the :meth:`_events.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`_engine.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`_events.PoolEvents.checkout` event
in that it is specific to the :class:`_engine.Connection` object,
not the
DBAPI connection that :meth:`_events.PoolEvents.checkout` deals with,
although
this DBAPI connection is available here via the
:attr:`_engine.Connection.connection` attribute.
But note there can in fact
be multiple :meth:`_events.PoolEvents.checkout`
events within the lifespan
of a single :class:`_engine.Connection` object, if that
:class:`_engine.Connection`
is invalidated and re-established. There can also be multiple
:class:`_engine.Connection`
objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a
:class:`_engine.Connection`
is produced.
:param conn: :class:`_engine.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`_engine.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:ref:`pool_disconnects_pessimistic` - illustrates how to use
:meth:`_events.ConnectionEvents.engine_connect`
to transparently ensure pooled connections are connected to the
database.
:meth:`_events.PoolEvents.checkout`
the lower-level pool checkout event
for an individual DBAPI connection
:meth:`_events.ConnectionEvents.set_connection_execution_options`
- a copy
of a :class:`_engine.Connection` is also made when the
:meth:`_engine.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`_engine.Connection.execution_options`
method is called.
This method is called after the new :class:`_engine.Connection`
has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new
:class:`_engine.Connection`
is produced which is inheriting execution options from its parent
:class:`_engine.Engine`; to intercept this condition, use the
:meth:`_events.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`_engine.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`_engine.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`_events.ConnectionEvents.set_engine_execution_options`
- event
which is called when :meth:`_engine.Engine.execution_options`
is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`_engine.Engine.execution_options`
method is called.
The :meth:`_engine.Engine.execution_options` method produces a shallow
copy of the :class:`_engine.Engine` which stores the new options.
That new
:class:`_engine.Engine` is passed here.
A particular application of this
method is to add a :meth:`_events.ConnectionEvents.engine_connect`
event<|fim▁hole|> :param conn: The newly copied :class:`_engine.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`_engine.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`_events.ConnectionEvents.set_connection_execution_options`
- event
which is called when :meth:`_engine.Connection.execution_options`
is
called.
"""
def engine_disposed(self, engine):
"""Intercept when the :meth:`_engine.Engine.dispose` method is called.
The :meth:`_engine.Engine.dispose` method instructs the engine to
"dispose" of it's connection pool (e.g. :class:`_pool.Pool`), and
replaces it with a new one. Disposing of the old pool has the
effect that existing checked-in connections are closed. The new
pool does not establish any new connections until it is first used.
This event can be used to indicate that resources related to the
:class:`_engine.Engine` should also be cleaned up,
keeping in mind that the
:class:`_engine.Engine`
can still be used for new requests in which case
it re-acquires connection resources.
.. versionadded:: 1.0.5
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`_engine.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`_pool.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`_events.PoolEvents.reset` hook.
:param conn: :class:`_engine.Connection` object
.. seealso::
:meth:`_events.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`_pool.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`_events.PoolEvents.reset` hook.
:param conn: :class:`_engine.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`_engine.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`_engine.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`_engine.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`_engine.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
class DialectEvents(event.Events):
"""event interface for execution-replacement functions.
These events allow direct instrumentation and replacement
of key dialect functions which interact with the DBAPI.
.. note::
:class:`.DialectEvents` hooks should be considered **semi-public**
and experimental.
These hooks are not for general use and are only for those situations
where intricate re-statement of DBAPI mechanics must be injected onto
an existing dialect. For general-use statement-interception events,
please use the :class:`_events.ConnectionEvents` interface.
.. seealso::
:meth:`_events.ConnectionEvents.before_cursor_execute`
:meth:`_events.ConnectionEvents.before_execute`
:meth:`_events.ConnectionEvents.after_cursor_execute`
:meth:`_events.ConnectionEvents.after_execute`
.. versionadded:: 0.9.4
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Dialect
@classmethod
def _listen(cls, event_key, retval=False):
target = event_key.dispatch_target
target._has_events = True
event_key.base_listen()
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Dialect
elif issubclass(target, Dialect):
return target
elif isinstance(target, Engine):
return target.dialect
else:
return target
def do_connect(self, dialect, conn_rec, cargs, cparams):
"""Receive connection arguments before a connection is made.
Return a DBAPI connection to halt further events from invoking;
the returned connection will be used.
Alternatively, the event can manipulate the cargs and/or cparams
collections; cargs will always be a Python list that can be mutated
in-place and cparams a Python dictionary. Return None to
allow control to pass to the next event handler and ultimately
to allow the dialect to connect normally, given the updated
arguments.
.. versionadded:: 1.0.3
"""
def do_executemany(self, cursor, statement, parameters, context):
"""Receive a cursor to have executemany() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute_no_params(self, cursor, statement, context):
"""Receive a cursor to have execute() with no parameters called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute(self, cursor, statement, parameters, context):
"""Receive a cursor to have execute() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_setinputsizes(
self, inputsizes, cursor, statement, parameters, context
):
"""Receive the setinputsizes dictionary for possible modification.
This event is emitted in the case where the dialect makes use of the
DBAPI ``cursor.setinputsizes()`` method which passes information about
parameter binding for a particular statement. The given
``inputsizes`` dictionary will contain :class:`.BindParameter` objects
as keys, linked to DBAPI-specific type objects as values; for
parameters that are not bound, they are added to the dictionary with
``None`` as the value, which means the parameter will not be included
in the ultimate setinputsizes call. The event may be used to inspect
and/or log the datatypes that are being bound, as well as to modify the
dictionary in place. Parameters can be added, modified, or removed
from this dictionary. Callers will typically want to inspect the
:attr:`.BindParameter.type` attribute of the given bind objects in
order to make decisions about the DBAPI object.
After the event, the ``inputsizes`` dictionary is converted into
an appropriate datastructure to be passed to ``cursor.setinputsizes``;
either a list for a positional bound parameter execution style,
or a dictionary of string parameter keys to DBAPI type objects for
a named bound parameter execution style.
Most dialects **do not use** this method at all; the only built-in
dialect which uses this hook is the cx_Oracle dialect. The hook here
is made available so as to allow customization of how datatypes are set
up with the cx_Oracle DBAPI.
.. versionadded:: 1.2.9
.. seealso::
:ref:`cx_oracle_setinputsizes`
"""
pass<|fim▁end|> | handler to the given :class:`_engine.Engine`
which will perform some per-
:class:`_engine.Connection` task specific to these execution options.
|
<|file_name|>count.py<|end_file_name|><|fim▁begin|># Copyright (c) 2016 Lee Cannon
# Licensed under the MIT License, see included LICENSE File
from collections import Counter
from .filter import at_trigrams, with_words
def count_trigrams(interactions: list, minimum: int = 1, n: int = None, include_unknown: bool = False) -> list:
"""Returns the n most common trigrams in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore trigrams that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param include_unknown: Determines if the interactions with unknown trigrams should be included. Default False
:type include_unknown: bool
:return: The list of most common trigrams in the interactions given.
:rtype: list
"""
# The below (if not interaction.trigram == 'OWN') ignores unknown trigrams
if not include_unknown:
trigram_list = [interaction.trigram for interaction in interactions if not interaction.trigram == 'OWN']
else:
trigram_list = [interaction.trigram for interaction in interactions]
return [trigram for trigram in Counter(trigram_list).most_common(n=n) if trigram[1] > minimum]
def count_words(interactions: list, minimum: int = 1, n: int = None, additional_words_to_ignore: list=None) -> list:
"""Returns the n most common words in the interactions given.
:param interactions: The interactions to check.
:type interactions: list
:param minimum: Ignore words that occur less than equal to minimum. Defaults to 1
:type minimum: int
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words in the interactions given.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
word_list = [word for interaction in interactions for word in set(interaction.title_words)
if word not in additional_words_to_ignore]
counts = Counter(word_list).most_common(n=n)
counts = [count for count in counts if count[1] > minimum]
return counts
def count_interactions(interactions: list):
return len(interactions)
def count_words_at_trigrams(interactions: list, trigrams: list, n: int = None, minimum: int = 2,
additional_words_to_ignore: list = None) -> list:
"""Returns the list of most common words at the given trigram in order. Ignores words where the number of
occurrences is less than the minimum.
Example of returned list:
| [['modnet', 1234],<|fim▁hole|> | ['password', 123],
| ['outlook', 34],
| ['network', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param trigrams: The list of trigrams to check.
:type trigrams: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:param additional_words_to_ignore: List of additional words to ignore
:type additional_words_to_ignore: list
:return: The list of most common words at the given trigram.
:rtype: list
"""
if additional_words_to_ignore is None:
additional_words_to_ignore = []
return [word for word in count_words(at_trigrams(interactions, trigrams), n=n)
if word[1] >= minimum and word[0] not in additional_words_to_ignore]
def count_trigram_with_words(interactions: list, words: list, n: int = None, minimum: int = 2) -> list:
"""Returns the list of most common trigrams for occurrences of the given word in order. Ignores trigrams where the
number of occurrences is less than the minimum.
Example of returned list:
| [['ABW', 1234],
| ['NOW', 123],
| ['YOR', 34],
| ['BRC', 4]]
:param interactions: The list of interactions to check.
:type interactions: list
:param words: The list of words to check.
:type words: list
:param n: The maximum number of words to return, defaults to None which returns all.
:type n: int
:param minimum: Ignores words where the number of occurrences is less than the minimum. Defaults to 2.
:type minimum: int
:return: The list of most common words at the given trigram.
:rtype: list
"""
return [trigram for trigram in count_trigrams(with_words(interactions, words), n=n)
if trigram[1] >= minimum]<|fim▁end|> | |
<|file_name|>applications.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2009,2014 Jaap Karssenberg <[email protected]>
'''This module contains helper classes for running external applications.
See L{zim.gui.applications} for classes with desktop integration for
applications defined in desktop entry files.
'''
import sys
import os
import logging
import subprocess
import gobject
import zim.fs
import zim.errors
from zim.fs import File
from zim.parsing import split_quoted_strings, is_uri_re, is_win32_path_re
from zim.environ import environ
logger = logging.getLogger('zim.applications')
def _main_is_frozen():
# Detect whether we are running py2exe compiled version
return hasattr(sys, 'frozen') and sys.frozen
class ApplicationError(zim.errors.Error):
'''Error raises for error in sub process errors'''
description = None
def __init__(self, cmd, args, retcode, stderr):
'''Constructor
@param cmd: the application command as string
@param args: tuple of arguments given to the command
@param retcode: the return code of the command (non-zero!)
@param stderr: the error output of the command
'''
self.msg = _('Failed to run application: %s') % cmd
# T: Error message when external application failed, %s is the command
self.description = \
_('%(cmd)s\nreturned non-zero exit status %(code)i') \
% {'cmd': cmd + ' "' + '" "'.join(args) + '"', 'code': retcode}
# T: Error message when external application failed, %(cmd)s is the command, %(code)i the exit code
if stderr:
self.description += '\n\n' + stderr
class Application(object):
'''Base class for objects representing an external application or
command.
@ivar name: the name of the command (default to first item of C{cmd})
@ivar cmd: the command and arguments as a tuple or a string
(when given as a string it will be parsed for quoted arguments)
@ivar tryexeccmd: the command to check in L{tryexec()}, if C{None}
fall back to first item of C{cmd}
'''
STATUS_OK = 0 #: return code when the command executed succesfully
def __init__(self, cmd, tryexeccmd=None, encoding=None):
'''Constructor
@param cmd: the command for the external application, either a
string for the command, or a tuple or list with the command
and arguments
@param tryexeccmd: command to check in L{tryexec()} as string.
If C{None} will default to C{cmd} or the first item of C{cmd}.
@param encoding: the encoding to use for commandline args<|fim▁hole|> cmd = split_quoted_strings(cmd)
else:
assert isinstance(cmd, (tuple, list))
assert tryexeccmd is None or isinstance(tryexeccmd, basestring)
self.cmd = tuple(cmd)
self.tryexeccmd = tryexeccmd
self.encoding = encoding or zim.fs.ENCODING
if self.encoding == 'mbcs':
self.encoding = 'utf-8'
def __repr__(self):
if hasattr(self, 'key'):
return '<%s: %s>' % (self.__class__.__name__, self.key)
elif hasattr(self, 'cmd'):
return '<%s: %s>' % (self.__class__.__name__, self.cmd)
else:
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def name(self):
return self.cmd[0]
@staticmethod
def _lookup(cmd):
'''Lookup cmd in PATH'''
if zim.fs.isabs(cmd):
if zim.fs.isfile(cmd):
return cmd
else:
return None
elif os.name == 'nt':
# Check executable extensions from windows environment
extensions = environ.get_list('PATHEXT', '.com;.exe;.bat;.cmd')
for dir in environ.get_list('PATH'):
for ext in extensions:
file = os.sep.join((dir, cmd + ext))
if zim.fs.isfile(file) and os.access(file, os.X_OK):
return file
else:
return None
else:
# On POSIX no extension is needed to make scripts executable
for dir in environ.get_list('PATH'):
file = os.sep.join((dir, cmd))
if zim.fs.isfile(file) and os.access(file, os.X_OK):
return file
else:
return None
def _cmd(self, args):
# substitute args in the command - to be overloaded by child classes
if args:
return self.cmd + tuple(map(unicode, args))
else:
return self.cmd
def tryexec(self):
'''Check if the executable exists without calling it. This
method is used e.g. to decide what applications to show in the
gui. Uses the C{tryexeccmd}, or the first item of C{cmd} as the
executable name.
@returns: C{True} when the executable was found
'''
cmd = self.tryexeccmd or self.cmd[0]
return not self._lookup(cmd) is None
def _checkargs(self, cwd, args):
assert args is None or isinstance(args, (tuple, list))
argv = self._cmd(args)
# Expand home dir
if argv[0].startswith('~'):
cmd = File(argv[0]).path
argv = list(argv)
argv[0] = cmd
# if it is a python script, insert interpreter as the executable
if argv[0].endswith('.py') and not _main_is_frozen():
argv = list(argv)
argv.insert(0, sys.executable)
# TODO: consider an additional commandline arg to re-use compiled python interpreter
argv = [a.encode(self.encoding) for a in argv]
if cwd:
cwd = unicode(cwd).encode(zim.fs.ENCODING)
return cwd, argv
def run(self, args=None, cwd=None):
'''Run the application in a sub-process and wait for it to finish.
Even when the application runs successfully, any message to stderr
is logged as a warning by zim.
@param args: additional arguments to give to the command as tuple or list
@param cwd: the folder to set as working directory for the command
@raises ApplicationError: if the sub-process returned an error.
'''
cwd, argv = self._checkargs(cwd, args)
logger.info('Running: %s (cwd: %s)', argv, cwd)
if os.name == 'nt':
# http://code.activestate.com/recipes/409002/
info = subprocess.STARTUPINFO()
try:
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
info.dwFlags |= 1 # STARTF_USESHOWWINDOW = 0x01
p = subprocess.Popen(argv,
cwd=cwd,
stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE,
startupinfo=info,
bufsize=4096,
#~ close_fds=True
)
else:
p = subprocess.Popen(argv,
cwd=cwd,
stdout=open(os.devnull, 'w'),
stderr=subprocess.PIPE,
bufsize=4096,
close_fds=True
)
stdout, stderr = p.communicate()
if not p.returncode == self.STATUS_OK:
raise ApplicationError(argv[0], argv[1:], p.returncode, stderr)
#~ elif stderr:
#~ logger.warn(stderr)
def pipe(self, args=None, cwd=None, input=None):
'''Run the application in a sub-process and capture the output.
Like L{run()}, but connects to stdin and stdout for the sub-process.
@note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
@param args: additional arguments to give to the command as tuple or list
@param cwd: the folder to set as working directory for the command
@param input: input for the command as string
@returns: output as a list of lines
@raises ApplicationError: if the sub-process returned an error.
'''
cwd, argv = self._checkargs(cwd, args)
logger.info('Running: %s (cwd: %s)', argv, cwd)
p = subprocess.Popen(argv, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input)
# TODO: handle ApplicationERror here as well ?
#~ if not p.returncode == self.STATUS_OK:
#~ raise ApplicationError(argv[0], argv[1:], p.returncode, stderr)
#~ elif stderr:
if stderr:
logger.warn(stderr)
# TODO: allow user to get this error as well - e.g. for logging image generator cmd
# Explicit newline conversion, e.g. on windows \r\n -> \n
# FIXME Assume local encoding is respected (!?)
text = [unicode(line + '\n', errors='replace') for line in stdout.splitlines()]
if text and text[-1].endswith('\n') and not stdout.endswith('\n'):
text[-1] = text[-1][:-1] # strip additional \n
return text
def spawn(self, args=None, callback=None, data=None, cwd=None):
'''Start the application in the background and return immediately.
This is used to start an external in parallel with zim that is
not expected to exit immediatly, so we do not want to wait for
it - e.g. a webbrowser to show an URL that was clicked.
@param args: additional arguments to give to the command as tuple or list
@param callback: optional callback can be used to trigger when
the application exits. The signature is::
callback(status, data)
where 'C{status}' is the exit status of the process. The
application object provides a constant 'C{STATUS_OK}' which can
be used to test if the application was successful or not.
@param data: additional data for the callback
@param cwd: the folder to set as working directory for the command
@returns: the PID for the new process
'''
cwd, argv = self._checkargs(cwd, args)
opts = {}
flags = gobject.SPAWN_SEARCH_PATH
if callback:
flags |= gobject.SPAWN_DO_NOT_REAP_CHILD
# without this flag child is reaped automatically -> no zombies
if not cwd:
cwd = os.getcwd()
logger.info('Spawning: %s (cwd: %s)', argv, cwd)
try:
pid, stdin, stdout, stderr = \
gobject.spawn_async(argv, flags=flags, working_directory=cwd, **opts)
except gobject.GError:
from zim.gui.widgets import ErrorDialog
ErrorDialog(None, _('Failed running: %s') % argv[0]).run()
#~ # T: error when application failed to start
return None
else:
logger.debug('Process started with PID: %i', pid)
if callback:
# child watch does implicit reaping -> no zombies
if data is None:
gobject.child_watch_add(pid,
lambda pid, status: callback(status))
else:
gobject.child_watch_add(pid,
lambda pid, status, data: callback(status, data), data)
return pid
class WebBrowser(Application):
'''Application wrapper for the C{webbrowser} module. Can be used as
fallback if no webbrowser is configured.
'''
name = _('Default') + ' (webbrowser)' # T: label for default webbrowser
key = 'webbrowser' # Used by zim.gui.applications
def __init__(self, encoding=None):
import webbrowser
self.controller = None
try:
self.controller = webbrowser.get()
except webbrowser.Error:
pass # webbrowser throws an error when no browser is found
self.encoding = encoding or zim.fs.ENCODING
if self.encoding == 'mbcs':
self.encoding = 'utf-8'
def tryexec(self):
return not self.controller is None
def run(self, args):
'''This method is not supported by this class
@raises NotImplementedError: always
'''
raise NotImplementedError('WebBrowser can not run in foreground')
def spawn(self, args, callback=None):
if callback:
raise NotImplementedError('WebBrowser can not handle callback')
for url in args:
if isinstance(url, (zim.fs.File, zim.fs.Dir)):
url = url.uri
url = url.encode(self.encoding)
logger.info('Opening in webbrowser: %s', url)
self.controller.open(url)
class StartFile(Application):
'''Application wrapper for C{os.startfile()}. Can be used on
windows to open files and URLs with the default application.
'''
name = _('Default') + ' (os)' # T: label for default application
key = 'startfile' # Used by zim.gui.applications
def __init__(self):
pass
def tryexec(self):
return hasattr(os, 'startfile')
def run(self, args):
'''This method is not supported by this class
@raises NotImplementedError: always
'''
raise NotImplementedError('StartFile can not run in foreground')
def spawn(self, args, callback=None):
if callback:
logger.warn('os.startfile does not support a callback')
for arg in args:
if isinstance(arg, (zim.fs.File, zim.fs.Dir)):
path = os.path.normpath(arg.path)
elif is_uri_re.match(arg) and not is_win32_path_re.match(arg):
# URL or e.g. mailto: or outlook: URI
path = unicode(arg)
else:
# must be file
path = os.path.normpath(unicode(arg))
logger.info('Opening with os.startfile: %s', path)
os.startfile(path)<|fim▁end|> | if known, else falls back to system default
'''
if isinstance(cmd, basestring): |
<|file_name|>gae.py<|end_file_name|><|fim▁begin|># Helper for the mirror on GAE
# GAE GETs an action gae_file, giving GAE host and a secret
# PyPI GETs /mkupload/secret, learning path and upload session
# PyPI POSTs to upload session
import urllib2, httplib, threading, os, binascii, urlparse
POST="""\
--%(boundary)s
Content-Disposition: form-data; name="secret"
%(secret)s
--%(boundary)s
Content-Disposition: form-data; name="path"
%(path)s
--%(boundary)s
Content-Disposition: form-data; name="file"; filename="%(path)s"
Content-Type: application/octet-stream
%(data)s
--%(boundary)s
"""
POST = "\r\n".join(POST.splitlines())+"\r\n"<|fim▁hole|> return
path,url = x.read().splitlines()
host, session = urlparse.urlsplit(url)[1:3]
try:
data = open(srcdir+"/"+path).read()
except IOError, e:
return
boundary = ""
while boundary in data:
boundary = binascii.hexlify(os.urandom(10))
body = POST % locals()
if ':' in host:
host, port = host.split(':')
else:
port = 80
c = httplib.HTTPConnection(host, port)
c.request('POST', session,
headers = {'Content-type':'multipart/form-data; boundary='+boundary,
'Content-length':str(len(body)),
'Host':host},
body=body)
resp = c.getresponse()
data = resp.read()
# result code should be redirect
c.close()
def transfer(host, secret, srcdir):
secret = secret.encode('ascii')
t = threading.Thread(target=doit, args=(host, secret, srcdir))
t.start()<|fim▁end|> |
def doit(host, secret, srcdir):
x = urllib2.urlopen('http://%s/mkupload/%s' % (host, secret))
if x.code != 200: |
<|file_name|>bind-by-move-neither-can-live-while-the-other-survives-4.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
impl Drop for X {
fn drop(&mut self) {
println!("destructor runs");
}
}
fn main() {<|fim▁hole|> }
}<|fim▁end|> | let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!() |
Subsets and Splits