filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go | package credentials
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/ini"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// SharedCredsProviderName provides a name of SharedCreds provider
const SharedCredsProviderName = "SharedCredentialsProvider"
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
var ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
// A SharedCredentialsProvider retrieves access key pair (access key ID,
// secret access key, and session token if present) credentials from the current
// user's home directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type SharedCredentialsProvider struct {
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.aws/credentials"
// Windows: "%USERPROFILE%\.aws\credentials"
Filename string
// AWS Profile to extract credentials from the shared credentials file. If empty
// will default to environment variable "AWS_PROFILE" or "default" if
// environment variable is also not set.
Profile string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewSharedCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewSharedCredentials(filename, profile string) *Credentials {
return NewCredentials(&SharedCredentialsProvider{
Filename: filename,
Profile: profile,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
p.retrieved = false
filename, err := p.filename()
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, err
}
creds, err := loadProfile(filename, p.profile())
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, err
}
p.retrieved = true
return creds, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *SharedCredentialsProvider) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.OpenFile(filename)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, ok := config.GetSection(profile)
if !ok {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
}
id := iniProfile.String("aws_access_key_id")
if len(id) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
nil)
}
secret := iniProfile.String("aws_secret_access_key")
if len(secret) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.String("aws_session_token")
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
ProviderName: SharedCredsProviderName,
}, nil
}
// filename returns the filename to use to read AWS shared credentials.
//
// Will return an error if the user's home directory path cannot be found.
func (p *SharedCredentialsProvider) filename() (string, error) {
if len(p.Filename) != 0 {
return p.Filename, nil
}
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
return p.Filename, nil
}
if home := shareddefaults.UserHomeDir(); len(home) == 0 {
// Backwards compatibility of home directly not found error being returned.
// This error is too verbose, failure when opening the file would of been
// a better error to return.
return "", ErrSharedCredentialsHomeNotFound
}
p.Filename = shareddefaults.SharedCredentialsFilename()
return p.Filename, nil
}
// profile returns the AWS shared credentials profile. If empty will read
// environment variable "AWS_PROFILE". If that is not set profile will
// return "default".
func (p *SharedCredentialsProvider) profile() string {
if p.Profile == "" {
p.Profile = os.Getenv("AWS_PROFILE")
}
if p.Profile == "" {
p.Profile = "default"
}
return p.Profile
}
| [
"\"AWS_SHARED_CREDENTIALS_FILE\"",
"\"AWS_PROFILE\""
]
| []
| [
"AWS_PROFILE",
"AWS_SHARED_CREDENTIALS_FILE"
]
| [] | ["AWS_PROFILE", "AWS_SHARED_CREDENTIALS_FILE"] | go | 2 | 0 | |
cave/com.raytheon.viz.textworkstation/src/com/raytheon/viz/textworkstation/TextWorkstationDlg.java | /**
* This software was developed and / or modified by Raytheon Company,
* pursuant to Contract DG133W-05-CQ-1067 with the US Government.
*
* U.S. EXPORT CONTROLLED TECHNICAL DATA
* This software product contains export-restricted data whose
* export/transfer/disclosure is restricted by U.S. law. Dissemination
* to non-U.S. persons whether in the United States or abroad requires
* an export license or other authorization.
*
* Contractor Name: Raytheon Company
* Contractor Address: 6825 Pine Street, Suite 340
* Mail Stop B8
* Omaha, NE 68106
* 402.291.0100
*
* See the AWIPS II Master Rights File ("Master Rights File.pdf") for
* further licensing information.
**/
package com.raytheon.viz.textworkstation;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.TimeZone;
import java.util.Timer;
import java.util.TimerTask;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.graphics.Font;
import org.eclipse.swt.graphics.Rectangle;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.widgets.Button;
import org.eclipse.swt.widgets.Display;
import org.eclipse.swt.widgets.Event;
import org.eclipse.swt.widgets.Label;
import org.eclipse.swt.widgets.Listener;
import org.eclipse.swt.widgets.Menu;
import org.eclipse.swt.widgets.MenuItem;
import org.eclipse.swt.widgets.Monitor;
import org.eclipse.swt.widgets.Shell;
import com.raytheon.uf.common.jms.notification.INotificationObserver;
import com.raytheon.uf.common.jms.notification.NotificationException;
import com.raytheon.uf.common.jms.notification.NotificationMessage;
import com.raytheon.uf.common.status.IUFStatusHandler;
import com.raytheon.uf.common.status.UFStatus;
import com.raytheon.uf.common.time.SimulatedTime;
import com.raytheon.uf.viz.core.ProgramArguments;
import com.raytheon.uf.viz.core.notification.jobs.NotificationManagerJob;
import com.raytheon.viz.texteditor.TextDisplayModel;
import com.raytheon.viz.texteditor.TextWorkstationConstants;
import com.raytheon.viz.texteditor.alarmalert.dialogs.CurrentAlarmQueue;
import com.raytheon.viz.texteditor.alarmalert.util.AlarmAlertFunctions;
import com.raytheon.viz.texteditor.alarmalert.util.AlarmAlertNotificationObserver;
import com.raytheon.viz.texteditor.dialogs.TextEditorCfg;
import com.raytheon.viz.texteditor.dialogs.TextEditorDialog;
import com.raytheon.viz.texteditor.msgs.ITextEditorCallback;
import com.raytheon.viz.texteditor.msgs.ITextWorkstationCallback;
import com.raytheon.viz.texteditor.notify.NotifyExpiration;
import com.raytheon.viz.texteditor.scripting.runner.TextWsScriptThreadManager;
import com.raytheon.viz.texteditor.util.RadarTextUtility;
import com.raytheon.viz.ui.dialogs.CaveSWTDialog;
import com.raytheon.viz.ui.dialogs.DialogUtil;
/**
* TextWorkstationDlg class.
*
* <pre>
*
* SOFTWARE HISTORY
*
* Date Ticket# Engineer Description
* ------------- ----------- ------------ --------------------------
* Sep 27, 2007 368 lvenable Initial creation.
* Oct 11, 2007 482 grichard Reformatted file.
* Nov 28, 2007 520 grichard Implemented build 11 features.
* Jan 03, 2008 637 grichard Implemented build 13 features.
* Jan 10, 2008 722 grichard Implemented build 14 features.
* May 16, 2008 1119 grichard Added support for IAviationObserver.
* Jun 03, 2008 937 grichard Corrected simple date formats.
* May 08, 2009 2104 grichard Added support for IRadarObserver.
* May 08, 2009 2104 grichard Added support for
* IScriptRunnerObserver.
* Jun 07, 2010 5851 cjeanbap Properly stop alert/alarm observer
* listener.
* Aug 23, 2010 2187 cjeanbap Removed window location to
* TextEditorDialog.preOpened().
* Oct 04, 2010 7193 cjeanbap Added if statement to
* notificationArrived(), to determine if
* the message has expired.
* Nov 08, 2010 7433 cjeanbap Check TextEditorDialog current mode
* before tempting to open dialog.
* Jan 05, 2011 7375 cjeanbap Fix disposed Widget exception.
* Feb 01, 2011 7193 cjeanbap Add boolean condition to check initial
* start time.
* Nov 03, 2011 11450 rferrel Change how old products pruge so it is
* no longer on times on two machines
* being in synch.
* Sep 26, 2012 1196 lvenable Dialog refactor to not block.
* Oct 02, 2012 1229 rferrel Option to allow blocking when top
* dialog.
* Dec 13, 2012 1353 rferrel Fix bug introduced in the Show all
* dialogs.
* Jan 30, 2013 14736 D. Friedman Display local time.
* Jun 24, 2013 15733 XHuang Display MAX_BUTTON_CNT (8 button).
* Jul 25, 2013 15733 Greg Hull Make dflt and max number of Text
* Buttons configurable.
* Oct 28, 2015 5054 randerso Make TextWorkstationDlg appear in
* upper left corner of monitor where
* parent shell is located
* Dec 14, 2015 4834 njensen Remove dead menu items
* Jan 26, 2016 5054 randerso Changed to use display as parent
* Feb 15, 2016 4860 njensen Removed references to
* IAviationObserver
* Mar 30, 2016 5513 randerso Fixed to display on same monitor as
* parent
* Feb 14, 2017 6037 randerso Ensure dialog does not appear over
* panels
* Jun 29, 2017 6347 randerso Use -monitor command line parameter,
* if present, when opening as top level
* window
* Jan 03, 2018 6804 tgurney Stop all scripts on dispose
* Jan 24, 2018 7132 tgurney Set alarm/alert bell to null on dispose
*
* </pre>
*
* @author lvenable
*/
public class TextWorkstationDlg extends CaveSWTDialog
implements ITextEditorCallback, INotificationObserver {
private final IUFStatusHandler statusHandler = UFStatus
.getHandler(getClass());
private int INIT_BUTTON_CNT = 4;
private int MAX_BUTTON_CNT = 8;
private String productToDisplay = null;
private static final long initDelta = 300L;
private static final long incDelta = 20L;
private long delta = TextWorkstationDlg.initDelta;
private Font font;
private Font fontAwipsLabel;
private MenuItem newWindowMenuItem;
private Label utcTimeLabel;
private Label localTimeLabel;
private final SimpleDateFormat sdfLocal = new SimpleDateFormat(
"EEE dd MMM yyyy HH:mm z");
private final SimpleDateFormat sdfUTC = new SimpleDateFormat(
"EEE dd MMM yyyy HH:mm z");
private Timer timer;
private Date date;
private List<Button> textBtnArray;
private List<TextEditorDialog> textEditorArray;
private TextEditorDialog wgDlg;
private NotifyExpiration notify;
private CurrentAlarmQueue alarmDlg;
private long initStartTime;
/**
* Create top level Text Workstation Dialog
*
* @param display
*
*/
public TextWorkstationDlg(Display display) {
super(display, SWT.DIALOG_TRIM | SWT.MIN, CAVE.PERSPECTIVE_INDEPENDENT
| CAVE.INDEPENDENT_SHELL | CAVE.DO_NOT_BLOCK);
setText("Text Workstation");
TextDisplayModel.getInstance().setTextRadar(new RadarTextUtility());
NotificationManagerJob.addQueueObserver(
TextWorkstationConstants.getTextWorkstationQueueName(), this);
initStartTime = System.currentTimeMillis();
}
@Override
protected void disposed() {
font.dispose();
fontAwipsLabel.dispose();
timer.cancel();
NotificationManagerJob.removeQueueObserver(
TextWorkstationConstants.getTextWorkstationQueueName(), null,
this);
AlarmAlertFunctions.destroyAlarmAlertBell();
TextWsScriptThreadManager.getInstance().stopAllScripts();
for (TextEditorDialog teDlg : textEditorArray) {
if (teDlg != null) {
teDlg.disposeDialog();
}
}
if (wgDlg != null) {
wgDlg.disposeDialog();
}
AlarmAlertNotificationObserver.removeNotificationObserver();
}
@Override
protected void initializeComponents(final Shell shell) {
setReturnValue(false);
notify = new NotifyExpiration(getDisplay());
font = new Font(shell.getDisplay(), "Monospace", 10, SWT.NORMAL);
fontAwipsLabel = new Font(shell.getDisplay(), "Helvetica", 24,
SWT.ITALIC);
INIT_BUTTON_CNT = TextEditorCfg.getTextEditorCfg()
.getDefaultNumEditors();
MAX_BUTTON_CNT = TextEditorCfg.getTextEditorCfg().getMaxNumEditors();
// Initialize all of the controls and layouts
sdfUTC.setTimeZone(TimeZone.getTimeZone("UTC"));
String localTZName = System.getenv("FXA_LOCAL_TZ");
sdfLocal.setTimeZone(localTZName != null
? TimeZone.getTimeZone(localTZName) : TimeZone.getDefault());
createMenus();
new Label(shell, SWT.NONE)
.setText("host: " + TextWorkstationConstants.getHostName());
createAwipsLabel();
createTimeLabels();
startTimeTimer();
createAlertAlarm();
createTextButtons();
createWarngenDisplay();
shell.addListener(SWT.Close, new Listener() {
@Override
public void handleEvent(Event event) {
event.doit = notify.checkExpirationNotices(getShell());
}
});
// Opens the alarm queue invisibly, to duplicate A1 functionality.
alarmDlg = CurrentAlarmQueue.getInstance(shell);
alarmDlg.openInvisible();
// Create the alarm alert bell
AlarmAlertFunctions.initAlarmAlertBell(shell);
}
@Override
protected void preOpened() {
super.preOpened();
Monitor monitor = null;
/* If we have a parent shell use the parent's monitor */
if (getParent() != null) {
monitor = getParent().getMonitor();
}
/* if no parent shell, must be top level window */
else {
/* Check for -monitor command line arg */
ProgramArguments args = ProgramArguments.getInstance();
Integer monitorIndex = args.getInteger("-monitor");
Display display = getDisplay();
if (monitorIndex != null) {
/* Clip index to valid range of monitors */
Monitor[] monitors = display.getMonitors();
if (monitorIndex < 0) {
monitorIndex = 0;
} else if (monitorIndex >= monitors.length) {
monitorIndex = monitors.length - 1;
}
monitor = monitors[monitorIndex];
}
/* Otherwise default to monitor containing cursor */
else {
monitor = DialogUtil.getCursorMonitor(display);
}
}
/* Set dialog location to upper left corner of monitor */
Rectangle clientArea = monitor.getClientArea();
shell.setLocation(clientArea.x, clientArea.y);
}
@Override
protected void opened() {
if (productToDisplay != null) {
wgDlg.showWarngenProduct(productToDisplay, notify);
}
// Display the first Text Editor
//showTextEditor(0);
}
private void createMenus() {
Menu menuBar = new Menu(shell, SWT.BAR);
createFileMenus(menuBar);
createWindowsMenus(menuBar);
shell.setMenuBar(menuBar);
}
private void createFileMenus(Menu menuBar) {
// -------------------------------------
// Create all the items in the file menu
// -------------------------------------
MenuItem fileMenuItem = new MenuItem(menuBar, SWT.CASCADE);
fileMenuItem.setText("File");
// Create the File menu item with a File "dropdown" menu
Menu fileMenu = new Menu(menuBar);
fileMenuItem.setMenu(fileMenu);
// --------------------------------------------------
// Create Exit menu item
// --------------------------------------------------
MenuItem exitMenuItem = new MenuItem(fileMenu, SWT.NONE);
exitMenuItem.setText("Exit");
exitMenuItem.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
getShell().close();
}
});
}
private void createWindowsMenus(Menu menuBar) {
// ----------------------------------------
// Create all the items in the Windows menu
// ----------------------------------------
MenuItem windowsMenuItem = new MenuItem(menuBar, SWT.CASCADE);
windowsMenuItem.setText("Windows");
// Create the File menu item with a File "dropdown" menu
Menu windowsMenu = new Menu(menuBar);
windowsMenuItem.setMenu(windowsMenu);
// --------------------------------------------------
// Create Hide All menu item
// --------------------------------------------------
MenuItem hideAllMenuItem = new MenuItem(windowsMenu, SWT.NONE);
hideAllMenuItem.setText("Hide All");
hideAllMenuItem.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
hideAllTextEditors();
}
});
// --------------------------------------------------
// Create Show All menu item
// --------------------------------------------------
MenuItem showAllMenuItem = new MenuItem(windowsMenu, SWT.NONE);
showAllMenuItem.setText("Show All");
showAllMenuItem.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
showAllTextEditors();
}
});
// -------------------------------
// Add a menu separator.
// -------------------------------
new MenuItem(windowsMenu, SWT.SEPARATOR);
// --------------------------------------------------
// Create New Window menu item
// --------------------------------------------------
newWindowMenuItem = new MenuItem(windowsMenu, SWT.NONE);
newWindowMenuItem.setText("New Window");
newWindowMenuItem.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
addNewWindowButton();
}
});
}
private void createAwipsLabel() {
GridData gd = new GridData(300, 20);
Label awipsBlankLabel = new Label(shell, SWT.NONE);
awipsBlankLabel.setFont(fontAwipsLabel);
awipsBlankLabel.setText(" ");
awipsBlankLabel.setLayoutData(gd);
gd = new GridData(300, 80);
Label awipsLabel = new Label(shell, SWT.NONE);
awipsLabel.setFont(fontAwipsLabel);
awipsLabel.setText(" AWIPS II");
awipsLabel.setLayoutData(gd);
}
private void createTimeLabels() {
GridData gd = null;
gd = new GridData(300, SWT.DEFAULT);
utcTimeLabel = new Label(shell, SWT.CENTER);
utcTimeLabel.setLayoutData(gd);
gd = new GridData(300, SWT.DEFAULT);
localTimeLabel = new Label(shell, SWT.CENTER);
localTimeLabel.setLayoutData(gd);
date = SimulatedTime.getSystemTime().getTime();
localTimeLabel.setText(sdfLocal.format(date));
utcTimeLabel.setText(sdfUTC.format(date));
}
private void createAlertAlarm() {
GridData gd = new GridData(SWT.FILL, SWT.DEFAULT, true, false);
Button alertAlarmBtn = new Button(shell, SWT.PUSH);
alertAlarmBtn.setText("Alarm/Alert");
alertAlarmBtn.setLayoutData(gd);
AlarmAlertNotificationObserver.getInstance();
alertAlarmBtn.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
alarmDlg.show();
}
});
}
private void createTextButtons() {
textBtnArray = new ArrayList<>();
textEditorArray = new ArrayList<>();
for (int x = 1; x <= INIT_BUTTON_CNT; ++x) {
createButtonAndTextEditor(x);
}
}
private void addNewWindowButton() {
int currentBtnCount = textEditorArray.size();
if (currentBtnCount < MAX_BUTTON_CNT) {
++currentBtnCount;
createButtonAndTextEditor(currentBtnCount);
}
if (currentBtnCount == MAX_BUTTON_CNT) {
newWindowMenuItem.setEnabled(false);
}
shell.pack();
}
private void createButtonAndTextEditor(int btnNumber) {
GridData gd = new GridData(SWT.FILL, SWT.DEFAULT, true, false);
Button textBtn = new Button(shell, SWT.PUSH);
String btnTitle = "Text " + btnNumber;
textBtn.setText(btnTitle);
textBtn.setLayoutData(gd);
textBtn.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent event) {
showTextEditor(textBtnArray.indexOf(event.getSource()));
}
});
textBtnArray.add(textBtn);
// Make place holder for the edit dialog and only create if requested.
textEditorArray.add(null);
}
private synchronized void createWarngenDisplay() {
if (wgDlg == null) {
wgDlg = new TextEditorDialog(getShell(), "Text Warngen", false, "9",
true);
}
}
private void showTextEditor(int editorIndex) {
TextEditorDialog teDlg = textEditorArray.get(editorIndex);
if (teDlg == null) {
// create a new instance
String btnTitle = "Text " + (editorIndex + 1);
teDlg = new TextEditorDialog(shell, btnTitle, false, this,
((Integer) (editorIndex + 1)).toString(), true, true,
CAVE.PERSPECTIVE_INDEPENDENT);
textEditorArray.set(editorIndex, teDlg);
}
textEditorArray.get(editorIndex).showDialog();
}
private void showAllTextEditors() {
for (int i = 0; i < textEditorArray.size(); i++) {
showTextEditor(i);
}
}
private void hideAllTextEditors() {
Shell myShell;
for (TextEditorDialog teDlg : textEditorArray) {
if (teDlg != null) {
teDlg.hideDialog();
}
}
for (int i = 1; i < 9; i++) {
ITextWorkstationCallback cb = TextDisplayModel.getInstance()
.getITextWorkstationCallback(((Integer) i).toString());
if (cb != null) {
if (cb.isBrowserActive()) {
myShell = cb.getShell();
myShell.setVisible(false);
}
}
}
}
private void startTimeTimer() {
timer = new Timer();
TimerTask updateTimeTask = new TimerTask() {
@Override
public void run() {
getDisplay().syncExec(new Runnable() {
@Override
public void run() {
updateTimeLabels();
}
});
}
};
timer.schedule(updateTimeTask, 200, 20_000);
}
private void updateTimeLabels() {
date = SimulatedTime.getSystemTime().getTime();
localTimeLabel.setText(sdfLocal.format(date));
utcTimeLabel.setText(sdfUTC.format(date));
}
@Override
public void restoreText(int teID) {
textBtnArray.get(teID).setText("Text " + (teID + 1));
}
@Override
public void updateText(int teID, String newText) {
// Pass in "token-1" as teID and
// "TextDisplayModel.getInstance().getControlDialogButtonText(((Integer)
// token).toString())" as newText.
textBtnArray.get(teID).setText(newText);
}
@Override
public synchronized void notificationArrived(
NotificationMessage[] messages) {
// SimpleDateFormat sdf = new
// SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
// sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
// System.out.println(sdf.format(new Date())
// + ": warning received by text workstation");
if (messages.length > 0) {
try {
NotificationMessage message = messages[messages.length - 1];
// Drain old messages in the queue.
// See DR#7193 and DR#11450
// Originally DR#7193 fixed its problem by getting a timestamp
// of when the message was created and seeing if it was less
// then the startime of this instance of the class. This created
// problems when the times on the box creating the WarnGen
// and the the box running textws differ by a large amount
// causing the problem in DR#11450. You could still get old
// queue message or even drop messages sent after textws was
// started.
//
// This approach drops messages that come in shortly after
// starting textws and does not depend of the times of the work
// stations being in synch. This assumes old messages will be
// sent shortly after a connection is made to the server.
//
// The ideal solution would have the creator of the WarnGen not
// even queue the message when there is no textws connected to
// service the queue. This would involve significant changes to
// how we use 3rd party software and can not be implemented at
// this time.
if (System.currentTimeMillis() - initStartTime <= delta) {
// Slowly increment the delta in case there are a lot of old
// messages.
delta += TextWorkstationDlg.incDelta;
} else if (message.isNotExpired()) {
String product = message.getMessagePayload().toString();
if (wgDlg == null) {
productToDisplay = product;
} else {
if (!wgDlg.isEditMode()) {
wgDlg.showWarngenProduct(product, notify);
} else {
wgDlg.enqueue(product, notify);
}
}
}
// TODO: Open up a text editor dialog and have it retrieve and
// parse the warning based on the afosId
} catch (NotificationException e) {
statusHandler
.warn("Error in received text product notification", e);
}
}
}
}
| [
"\"FXA_LOCAL_TZ\""
]
| []
| [
"FXA_LOCAL_TZ"
]
| [] | ["FXA_LOCAL_TZ"] | java | 1 | 0 | |
src/lambda/cmd/cmd.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import json
import uuid
import boto3
import datetime
flight_table_name = os.environ['ORDER_TABLE_NAME']
init_db_lambda_name = os.environ['INITDB_LAMBDA_NAME']
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(flight_table_name)
lam = boto3.client('lambda')
def http_return(body):
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': body
}
def delete_ddb_items():
# check out https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html
scan = table.scan()
with table.batch_writer() as batch:
for each in scan['Items']:
batch.delete_item(
Key={
'customer_id': each['customer_id'],
}
)
def load_customer(customer_id):
resp = table.get_item(
Key={
'customer_id': customer_id,
}
)
if 'Item' in resp:
return resp['Item']
else:
return None
def new_order(req):
order = {}
order['id'] = str(uuid.uuid4())
order['flight_number'] = req['flight_number']
order['flight_date'] = req['flight_date']
order['from'] = req['from']
# order['from_airport'] = req['from_airport']
order['to'] = req['to']
# order['to_airport'] = req['to_airport']
order['start'] = req['start']
order['end'] = req['end']
order['created_at'] = str(datetime.datetime.now())
order['status'] = 'Ordered'
return order
def lambda_handler(event, context):
print('request: {}'.format(json.dumps(event)))
path = event["path"]
method = event["httpMethod"]
req = {}
if 'body' in event and event['body'] != None:
req = json.loads(event['body'])
print(req)
# 3 Lambda Invocation Types, both 1/2 works here:
# 1. RequestResponse – Execute synchronously, no retry.
# 2. Event – Execute asynchronously, retry 2 times upon failure.
# 3. DryRun – Test that the invocation is permitted for the caller, but don’t execute the function.
# API Gateway support both Sync(default)/Async, set integration reqeust http header value: X-Amz-Invocation-Type:Event for async
# clear DDB and RDS
if path == "/truncate":
delete_ddb_items()
lam.invoke(
FunctionName=init_db_lambda_name,
InvocationType='Event',
Payload=json.dumps( {"TRUNCATE": "y"}))
return http_return('DDB & RDS truncated successfully!\n')
# create RDS Demo DB
if path == "/initdb":
lam.invoke(
FunctionName=init_db_lambda_name,
InvocationType='Event',
Payload=json.dumps( {"KEY1": "y"}))
return http_return('RDS MySQL initialized successfully!\n')
# Handler customer
if path == '/':
# Get customer info along with orders
if method == 'GET':
if 'queryStringParameters' in event \
and 'customer_id' in event['queryStringParameters']:
customer_id = event['queryStringParameters']['customer_id']
# If found return sth like below, otherwise, "Item key not found"
# "Item": {
# "created": "2019-10-19 02:07:50.538833", "customer_id": "23234234234",
# "id": "5a9deb22-4b04-47e6-b11e-f6c9e220aa4e", "flight id": "FM1234",
# "flight_date": "2019-11-11" }
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': json.dumps(load_customer(customer_id))
}
else:
return {
'statusCode': 404,
'headers': {
'Content-Type': 'text/plain'
},
'body': "Not Found Customer"
}
# Create customer
elif method == 'POST':
resp = table.put_item(
Item={
'customer_id': req['customer_id'],
'full_name': req['full_name'],
'created_at': str(datetime.datetime.now())
}
)
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': json.dumps(resp)
}
# Update customer basic information
elif method == 'PUT':
resp = table.update_item(
Key={
'customer_id': req['customer_id'],
},
UpdateExpression='SET full_name = :val1',
ExpressionAttributeValues={
':val1': req['full_name']
}
)
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': json.dumps(resp)
}
# Handle customer orders
if path == "/order":
if method == "GET":
pass
# Insert new order to the order list head
elif method == "POST":
customer = load_customer(req['customer_id'])
# Create customer with order on the same time
if customer == None:
orders = [new_order(req)]
resp = table.put_item(
Item={
'customer_id': req['customer_id'],
'orders': json.dumps(orders)
}
)
# Insert order into customer's order list
else:
orders = []
if 'orders' in customer:
orders = json.loads(customer['orders'])
for order in orders:
# Update order status
if order['flight_number'] == req['flight_number'] \
and order['flight_date'] == req['flight_date'] \
and order['status'] == 'Ordered':
return {
'statusCode': 400,
'headers': {
'Content-Type': 'text/plain'
},
'body': 'FLight already ordered'
}
orders.insert(0, new_order(req))
resp = table.update_item(
Key={
'customer_id': customer['customer_id'],
},
UpdateExpression='SET orders = :val1',
ExpressionAttributeValues={
':val1': json.dumps(orders)
}
)
print('response: {}'.format(json.dumps(resp)))
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': json.dumps(resp)
}
# Update order status
elif method == "PUT":
customer = load_customer(req['customer_id'])
if 'orders' in customer:
orders = json.loads(customer['orders'])
for order in orders:
# Update order status
if order['flight_number'] == req['flight_number'] \
and order['flight_date'] == req['flight_date']:
order['status'] = req['status']
order['updated_at'] = str(datetime.datetime.now())
resp = table.update_item(
Key={
'customer_id': customer['customer_id'],
},
UpdateExpression='SET orders = :val1',
ExpressionAttributeValues={
':val1': json.dumps(orders)
}
)
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/plain'
},
'body': json.dumps(resp)
}
return {
'statusCode': 404,
'headers': {
'Content-Type': 'text/plain'
},
'body': "Order Not Found"
}
| []
| []
| [
"ORDER_TABLE_NAME",
"INITDB_LAMBDA_NAME"
]
| [] | ["ORDER_TABLE_NAME", "INITDB_LAMBDA_NAME"] | python | 2 | 0 | |
code/client/munkilib/admin/pkginfolib.py | # encoding: utf-8
#
# Copyright 2017-2019 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
pkginfolib
Created by Greg Neagle on 2017-11-18.
Routines used by makepkginfo to create pkginfo files
"""
from __future__ import absolute_import, division, print_function
# standard libs
import optparse
import os
import re
import sys
import time
# Apple frameworks via PyObjC
# PyLint cannot properly find names inside Cocoa libraries, so issues bogus
# No name 'Foo' in module 'Bar' warnings. Disable them.
# pylint: disable=E0611
from Foundation import NSDate, NSUserName
# pylint: enable=E0611
# our libs
from .common import AttributeDict
from .. import dmgutils
from .. import info
from .. import munkihash
from .. import osinstaller
from .. import osutils
from .. import pkgutils
from .. import profiles
from .. import FoundationPlist
from ..adobeutils import adobeinfo
# circumvent cfprefsd plist scanning
os.environ['__CFPREFERENCES_AVOID_DAEMON'] = "1"
class PkgInfoGenerationError(Exception):
'''Error to raise if there is a fatal error when generating pkginfo'''
pass
def make_pkginfo_metadata():
'''Records information about the environment in which the pkginfo was
created so we have a bit of an audit trail. Returns a dictionary.'''
metadata = {}
metadata['created_by'] = NSUserName()
metadata['creation_date'] = NSDate.new()
metadata['munki_version'] = info.get_version()
metadata['os_version'] = osutils.getOsVersion(only_major_minor=False)
return metadata
def convert_date_string_to_nsdate(datetime_string):
'''Converts a string in the "2013-04-25T20:00:00Z" format or
"2013-04-25 20:00:00 +0000" format to an NSDate'''
nsdate_format = '%Y-%m-%dT%H:%M:%SZ'
iso_format = '%Y-%m-%d %H:%M:%S +0000'
fallback_format = '%Y-%m-%d %H:%M:%S'
try:
tobj = time.strptime(datetime_string, nsdate_format)
except ValueError:
try:
tobj = time.strptime(datetime_string, iso_format)
except ValueError:
try:
tobj = time.strptime(datetime_string, fallback_format)
except ValueError:
return None
iso_date_string = time.strftime(iso_format, tobj)
return NSDate.dateWithString_(iso_date_string)
def get_catalog_info_from_path(pkgpath, options):
"""Gets package metadata for the package at pathname.
Returns cataloginfo"""
cataloginfo = {}
if os.path.exists(pkgpath):
cataloginfo = pkgutils.getPackageMetaData(pkgpath)
if options.installer_choices_xml:
installer_choices_xml = pkgutils.getChoiceChangesXML(pkgpath)
if installer_choices_xml:
cataloginfo['installer_choices_xml'] = installer_choices_xml
if cataloginfo:
# we found a package, but let's see if it's an Adobe CS5 install
# (AAMEE) package
if 'receipts' in cataloginfo:
try:
pkgid = cataloginfo['receipts'][0].get('packageid')
except IndexError:
pkgid = ""
if pkgid.startswith("com.adobe.Enterprise.install"):
# we have an Adobe CS5 install package, process
# as Adobe install
#adobepkgname = cataloginfo['receipts'][0].get('filename')
cataloginfo = adobeinfo.getAdobeCatalogInfo(pkgpath)
#mountpoints[0], adobepkgname)
else:
# maybe an Adobe installer/updater/patcher?
cataloginfo = adobeinfo.getAdobeCatalogInfo(pkgpath,
options.pkgname or '')
return cataloginfo
class ProfileMetadataGenerationError(PkgInfoGenerationError):
'''Error to raise when we can't generate config profile metadata'''
pass
def get_catalog_info_for_profile(profile_path):
'''Populates some metadata for profile pkginfo'''
cataloginfo = {}
profile = profiles.read_profile(profile_path)
if profile.get('PayloadType') == 'Configuration':
try:
cataloginfo['PayloadIdentifier'] = profile['PayloadIdentifier']
except (KeyError, AttributeError):
# this thing is broken! return the empty info
return cataloginfo
cataloginfo['name'] = os.path.basename(profile_path)
cataloginfo['display_name'] = profile.get(
'PayloadDisplayName', cataloginfo['name'])
cataloginfo['description'] = profile.get('PayloadDescription', '')
cataloginfo['version'] = '1.0'
cataloginfo['installer_type'] = 'profile'
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = 'remove_profile'
cataloginfo['unattended_install'] = True
cataloginfo['unattended_uninstall'] = True
cataloginfo['minimum_os_version'] = '10.7'
cataloginfo['minimum_munki_version'] = '2.2'
else:
raise ProfileMetadataGenerationError(
'Profile PayloadType is %s' % profile.get('PayloadType'))
return cataloginfo
def get_catalog_info_from_dmg(dmgpath, options):
"""
* Mounts a disk image if it's not already mounted
* Gets catalog info for the first installer item found at the root level.
* Unmounts the disk image if it wasn't already mounted
To-do: handle multiple installer items on a disk image(?)
"""
cataloginfo = None
was_already_mounted = dmgutils.diskImageIsMounted(dmgpath)
mountpoints = dmgutils.mountdmg(dmgpath, use_existing_mounts=True)
if not mountpoints:
raise PkgInfoGenerationError("Could not mount %s!" % dmgpath)
if options.pkgname:
pkgpath = os.path.join(mountpoints[0], options.pkgname)
cataloginfo = get_catalog_info_from_path(pkgpath, options)
if cataloginfo:
cataloginfo['package_path'] = options.pkgname
elif not options.item:
# search for first package at root
for fsitem in osutils.listdir(mountpoints[0]):
itempath = os.path.join(mountpoints[0], fsitem)
if pkgutils.hasValidInstallerItemExt(itempath):
cataloginfo = get_catalog_info_from_path(itempath, options)
# get out of fsitem loop
break
if not cataloginfo and not options.item:
# look for one of the many possible Adobe installer/updaters
cataloginfo = adobeinfo.getAdobeCatalogInfo(
mountpoints[0], options.pkgname or '')
if not cataloginfo:
# could be a wrapped Install macOS.app
install_macos_app = osinstaller.find_install_macos_app(mountpoints[0])
if (install_macos_app and options.print_warnings and
osinstaller.install_macos_app_is_stub(install_macos_app)):
print('WARNING: %s appears to be an Install macOS application, but '
'it does not contain Contents/SharedSupport/InstallESD.dmg'
% os.path.basename(install_macos_app), file=sys.stderr)
cataloginfo = osinstaller.get_catalog_info(mountpoints[0])
if not cataloginfo:
# maybe this is a drag-n-drop dmg
# look for given item or an app at the top level of the dmg
iteminfo = {}
if options.item:
item = options.item
# Create a path by joining the mount point and the provided item
# path.
# The os.path.join method will intelligently take care of the
# following scenarios:
# ("/mountpoint", "relative/path") -> "/mountpoint/relative/path"
# ("/mountpoint", "/absolute/path") -> "/absolute/path"
itempath = os.path.join(mountpoints[0], item)
# Now check that the item actually exists and is located within the
# mount point
if os.path.exists(itempath) and itempath.startswith(mountpoints[0]):
iteminfo = getiteminfo(itempath)
else:
if not was_already_mounted:
dmgutils.unmountdmg(mountpoints[0])
raise PkgInfoGenerationError(
"%s not found on disk image." % item)
else:
# no item specified; look for an application at root of
# mounted dmg
item = ''
for itemname in osutils.listdir(mountpoints[0]):
itempath = os.path.join(mountpoints[0], itemname)
if pkgutils.isApplication(itempath):
item = itemname
iteminfo = getiteminfo(itempath)
if iteminfo:
break
if iteminfo:
item_to_copy = {}
if os.path.isabs(item):
# Absolute path given
# Remove the mountpoint from item path
mountpoint_pattern = "^%s/" % mountpoints[0]
item = re.sub(mountpoint_pattern, '', item)
if options.destitemname:
# An alternate 'destination_item' name has been specified
dest_item = options.destitemname
item_to_copy['destination_item'] = options.destitemname
else:
dest_item = item
# Use only the last path component when
# composing the path key of an installs item
dest_item_filename = os.path.split(dest_item)[1]
if options.destinationpath:
iteminfo['path'] = os.path.join(
options.destinationpath, dest_item_filename)
else:
iteminfo['path'] = os.path.join(
"/Applications", dest_item_filename)
cataloginfo = {}
cataloginfo['name'] = iteminfo.get(
'CFBundleName', os.path.splitext(item)[0])
version_comparison_key = iteminfo.get(
'version_comparison_key', "CFBundleShortVersionString")
cataloginfo['version'] = \
iteminfo.get(version_comparison_key, "0")
cataloginfo['installs'] = [iteminfo]
cataloginfo['installer_type'] = "copy_from_dmg"
item_to_copy['source_item'] = item
item_to_copy['destination_path'] = (
options.destinationpath or "/Applications")
if options.user:
item_to_copy['user'] = options.user
if options.group:
item_to_copy['group'] = options.group
if options.mode:
item_to_copy['mode'] = options.mode
cataloginfo['items_to_copy'] = [item_to_copy]
cataloginfo['uninstallable'] = True
cataloginfo['uninstall_method'] = "remove_copied_items"
#eject the dmg
if not was_already_mounted:
dmgutils.unmountdmg(mountpoints[0])
return cataloginfo
# TO-DO: this (or a similar) function is defined several places. De-dupe.
def readfile(path):
'''Reads file at path. Returns a string.'''
try:
fileobject = open(os.path.expanduser(path), mode='r', buffering=1)
data = fileobject.read()
fileobject.close()
return data
except (OSError, IOError):
print("Couldn't read %s" % path, file=sys.stderr)
return ""
def read_file_or_string(option_value):
"""
If option_value is a path to a file,
return contents of file.
Otherwise, return the string.
"""
if os.path.exists(os.path.expanduser(option_value)):
string = readfile(option_value)
else:
string = option_value
return string
def getiteminfo(itempath):
"""
Gets info for filesystem items passed to makecatalog item, to be used for
the "installs" key.
Determines if the item is an application, bundle, Info.plist, or a file or
directory and gets additional metadata for later comparison.
"""
infodict = {}
if pkgutils.isApplication(itempath):
infodict['type'] = 'application'
infodict['path'] = itempath
plist = pkgutils.getBundleInfo(itempath)
for key in ['CFBundleName', 'CFBundleIdentifier',
'CFBundleShortVersionString', 'CFBundleVersion']:
if key in plist:
infodict[key] = plist[key]
if 'LSMinimumSystemVersion' in plist:
infodict['minosversion'] = plist['LSMinimumSystemVersion']
elif 'LSMinimumSystemVersionByArchitecture' in plist:
# just grab the highest version if more than one is listed
versions = [item[1] for item in
plist['LSMinimumSystemVersionByArchitecture'].items()]
highest_version = str(max([pkgutils.MunkiLooseVersion(version)
for version in versions]))
infodict['minosversion'] = highest_version
elif 'SystemVersionCheck:MinimumSystemVersion' in plist:
infodict['minosversion'] = \
plist['SystemVersionCheck:MinimumSystemVersion']
elif (os.path.exists(os.path.join(itempath, 'Contents', 'Info.plist')) or
os.path.exists(os.path.join(itempath, 'Resources', 'Info.plist'))):
infodict['type'] = 'bundle'
infodict['path'] = itempath
plist = pkgutils.getBundleInfo(itempath)
for key in ['CFBundleShortVersionString', 'CFBundleVersion']:
if key in plist:
infodict[key] = plist[key]
elif itempath.endswith("Info.plist") or itempath.endswith("version.plist"):
infodict['type'] = 'plist'
infodict['path'] = itempath
try:
plist = FoundationPlist.readPlist(itempath)
for key in ['CFBundleShortVersionString', 'CFBundleVersion']:
if key in plist:
infodict[key] = plist[key]
except FoundationPlist.NSPropertyListSerializationException:
pass
# let's help the admin -- if CFBundleShortVersionString is empty
# or doesn't start with a digit, and CFBundleVersion is there
# use CFBundleVersion as the version_comparison_key
if (not infodict.get('CFBundleShortVersionString') or
infodict['CFBundleShortVersionString'][0]
not in '0123456789'):
if infodict.get('CFBundleVersion'):
infodict['version_comparison_key'] = 'CFBundleVersion'
elif 'CFBundleShortVersionString' in infodict:
infodict['version_comparison_key'] = 'CFBundleShortVersionString'
if ('CFBundleShortVersionString' not in infodict and
'CFBundleVersion' not in infodict):
infodict['type'] = 'file'
infodict['path'] = itempath
if os.path.isfile(itempath):
infodict['md5checksum'] = munkihash.getmd5hash(itempath)
return infodict
def makepkginfo(installeritem, options):
'''Return a pkginfo dictionary for item'''
if isinstance(options, dict):
options = AttributeDict(options)
pkginfo = {}
installs = []
if installeritem:
if not os.path.exists(installeritem):
raise PkgInfoGenerationError(
"File %s does not exist" % installeritem)
# Check if the item is a mount point for a disk image
if dmgutils.pathIsVolumeMountPoint(installeritem):
# Get the disk image path for the mount point
# and use that instead of the original item
installeritem = dmgutils.diskImageForMountPoint(installeritem)
# get size of installer item
itemsize = 0
itemhash = "N/A"
if os.path.isfile(installeritem):
itemsize = int(os.path.getsize(installeritem))
try:
itemhash = munkihash.getsha256hash(installeritem)
except OSError as err:
raise PkgInfoGenerationError(err)
if pkgutils.hasValidDiskImageExt(installeritem):
if dmgutils.DMGisWritable(installeritem) and options.print_warnings:
print("WARNING: %s is a writable disk image. "
"Checksum verification is not supported." % installeritem,
file=sys.stderr)
print("WARNING: Consider converting %s to a read-only disk"
"image." % installeritem, file=sys.stderr)
itemhash = "N/A"
pkginfo = get_catalog_info_from_dmg(installeritem, options)
if (pkginfo and
pkginfo.get('installer_type') == "AdobeCS5Installer"):
raise PkgInfoGenerationError(
"This disk image appears to contain an Adobe CS5/CS6 "
"product install.\n"
"Please use Adobe Application Manager, Enterprise "
"Edition (AAMEE) to create an installation package "
"for this product.")
if not pkginfo:
raise PkgInfoGenerationError(
"Could not find a supported installer item in %s!"
% installeritem)
elif pkgutils.hasValidPackageExt(installeritem):
pkginfo = get_catalog_info_from_path(installeritem, options)
if not pkginfo:
raise PkgInfoGenerationError(
"%s doesn't appear to be a valid installer item!"
% installeritem)
if os.path.isdir(installeritem) and options.print_warnings:
print("WARNING: %s is a bundle-style package!\n"
"To use it with Munki, you should encapsulate it "
"in a disk image.\n" % installeritem, file=sys.stderr)
# need to walk the dir and add it all up
for (path, dummy_dirs, files) in os.walk(installeritem):
for name in files:
filename = os.path.join(path, name)
# use os.lstat so we don't follow symlinks
itemsize += int(os.lstat(filename).st_size)
# convert to kbytes
itemsize = int(itemsize/1024)
elif pkgutils.hasValidConfigProfileExt(installeritem):
try:
pkginfo = get_catalog_info_for_profile(installeritem)
except ProfileMetadataGenerationError as err:
print(err, file=sys.stderr)
raise PkgInfoGenerationError(
"%s doesn't appear to be a supported configuration "
"profile!" % installeritem)
else:
raise PkgInfoGenerationError(
"%s is not a valid installer item!" % installeritem)
pkginfo['installer_item_size'] = int(itemsize/1024)
if itemhash != "N/A":
pkginfo['installer_item_hash'] = itemhash
# try to generate the correct item location
temppath = installeritem
location = ""
while len(temppath) > 4:
if temppath.endswith('/pkgs'):
location = installeritem[len(temppath)+1:]
break
else:
temppath = os.path.dirname(temppath)
if not location:
#just the filename
location = os.path.split(installeritem)[1]
pkginfo['installer_item_location'] = location
# ADOBE STUFF - though maybe generalizable in the future?
if (pkginfo.get('installer_type') == "AdobeCCPInstaller" and
not options.uninstalleritem) and options.print_warnings:
print("WARNING: This item appears to be an Adobe Creative "
"Cloud product install.\n"
"No uninstaller package was specified so product "
"removal will not be possible.", file=sys.stderr)
pkginfo['uninstallable'] = False
if 'uninstall_method' in pkginfo:
del pkginfo['uninstall_method']
if options.uninstalleritem:
uninstallerpath = options.uninstalleritem
if os.path.exists(uninstallerpath):
# try to generate the correct item location
temppath = uninstallerpath
location = ""
while len(temppath) > 4:
if temppath.endswith('/pkgs'):
location = uninstallerpath[len(temppath)+1:]
break
else:
temppath = os.path.dirname(temppath)
if not location:
#just the filename
location = os.path.split(uninstallerpath)[1]
pkginfo['uninstaller_item_location'] = location
itemsize = int(os.path.getsize(uninstallerpath))
itemhash = munkihash.getsha256hash(uninstallerpath)
pkginfo['uninstaller_item_size'] = int(itemsize/1024)
pkginfo['uninstaller_item_hash'] = itemhash
else:
raise PkgInfoGenerationError(
"No uninstaller item at %s" % uninstallerpath)
# if we have receipts, assume we can uninstall using them
if pkginfo.get('receipts', None):
pkginfo['uninstallable'] = True
pkginfo['uninstall_method'] = "removepackages"
else:
if options.nopkg:
pkginfo['installer_type'] = "nopkg"
if options.catalog:
pkginfo['catalogs'] = options.catalog
else:
pkginfo['catalogs'] = ['testing']
if options.description:
pkginfo['description'] = read_file_or_string(options.description)
if options.displayname:
pkginfo['display_name'] = options.displayname
if options.name:
pkginfo['name'] = options.name
if options.pkgvers:
pkginfo['version'] = options.pkgvers
if options.category:
pkginfo['category'] = options.category
if options.developer:
pkginfo['developer'] = options.developer
if options.icon:
pkginfo['icon_name'] = options.icon
default_minosversion = "10.4.0"
maxfileversion = "0.0.0.0.0"
if pkginfo:
pkginfo['autoremove'] = False
if not 'version' in pkginfo:
if maxfileversion != "0.0.0.0.0":
pkginfo['version'] = maxfileversion
else:
pkginfo['version'] = "1.0.0.0.0 (Please edit me!)"
if options.file:
for fitem in options.file:
# no trailing slashes, please.
fitem = fitem.rstrip('/')
if fitem.startswith('/Library/Receipts'):
# no receipts, please!
if options.print_warnings:
print("Item %s appears to be a receipt. Skipping." % fitem,
file=sys.stderr)
continue
if os.path.exists(fitem):
iteminfodict = getiteminfo(fitem)
if 'CFBundleShortVersionString' in iteminfodict:
thisitemversion = \
iteminfodict['CFBundleShortVersionString']
if (pkgutils.MunkiLooseVersion(thisitemversion) >
pkgutils.MunkiLooseVersion(maxfileversion)):
maxfileversion = thisitemversion
installs.append(iteminfodict)
elif options.print_warnings:
print("Item %s doesn't exist. Skipping." % fitem,
file=sys.stderr)
if installs:
pkginfo['installs'] = installs
# determine minimum_os_version from identified apps in the installs array
if 'installs' in pkginfo:
# build a list of minosversions using a list comprehension
item_minosversions = [
pkgutils.MunkiLooseVersion(item['minosversion'])
for item in pkginfo['installs']
if 'minosversion' in item]
# add the default in case it's an empty list
item_minosversions.append(
pkgutils.MunkiLooseVersion(default_minosversion))
if 'minimum_os_version' in pkginfo:
# handle case where value may have been set (e.g. flat package)
item_minosversions.append(pkgutils.MunkiLooseVersion(
pkginfo['minimum_os_version']))
# get the maximum from the list and covert back to string
pkginfo['minimum_os_version'] = str(max(item_minosversions))
if not 'minimum_os_version' in pkginfo:
# ensure a minimum_os_version is set unless using --file option only
pkginfo['minimum_os_version'] = default_minosversion
if options.file and not installeritem:
# remove minimum_os_version as we don't include it for --file only
pkginfo.pop('minimum_os_version')
if options.installcheck_script:
scriptstring = readfile(options.installcheck_script)
if scriptstring:
pkginfo['installcheck_script'] = scriptstring
if options.uninstallcheck_script:
scriptstring = readfile(options.uninstallcheck_script)
if scriptstring:
pkginfo['uninstallcheck_script'] = scriptstring
if options.postinstall_script:
scriptstring = readfile(options.postinstall_script)
if scriptstring:
pkginfo['postinstall_script'] = scriptstring
if options.preinstall_script:
scriptstring = readfile(options.preinstall_script)
if scriptstring:
pkginfo['preinstall_script'] = scriptstring
if options.postuninstall_script:
scriptstring = readfile(options.postuninstall_script)
if scriptstring:
pkginfo['postuninstall_script'] = scriptstring
if options.preuninstall_script:
scriptstring = readfile(options.preuninstall_script)
if scriptstring:
pkginfo['preuninstall_script'] = scriptstring
if options.uninstall_script:
scriptstring = readfile(options.uninstall_script)
if scriptstring:
pkginfo['uninstall_script'] = scriptstring
pkginfo['uninstall_method'] = 'uninstall_script'
if options.autoremove:
pkginfo['autoremove'] = True
if options.minimum_munki_version:
pkginfo['minimum_munki_version'] = options.minimum_munki_version
if options.OnDemand:
pkginfo['OnDemand'] = True
if options.unattended_install:
pkginfo['unattended_install'] = True
if options.unattended_uninstall:
pkginfo['unattended_uninstall'] = True
if options.minimum_os_version:
pkginfo['minimum_os_version'] = options.minimum_os_version
if options.maximum_os_version:
pkginfo['maximum_os_version'] = options.maximum_os_version
if options.force_install_after_date:
date_obj = convert_date_string_to_nsdate(
options.force_install_after_date)
if date_obj:
pkginfo['force_install_after_date'] = date_obj
else:
raise PkgInfoGenerationError(
"Invalid date format %s for force_install_after_date"
% options.force_install_after_date)
if options.RestartAction:
valid_actions = ['RequireRestart', 'RequireLogout', 'RecommendRestart']
if options.RestartAction in valid_actions:
pkginfo['RestartAction'] = options.RestartAction
elif 'restart' in options.RestartAction.lower():
pkginfo['RestartAction'] = 'RequireRestart'
elif 'logout' in options.RestartAction.lower():
pkginfo['RestartAction'] = 'RequireLogout'
if options.update_for:
pkginfo['update_for'] = options.update_for
if options.requires:
pkginfo['requires'] = options.requires
if options.blocking_application:
pkginfo['blocking_applications'] = options.blocking_application
if options.uninstall_method:
pkginfo['uninstall_method'] = options.uninstall_method
if options.installer_environment:
try:
installer_environment_dict = dict(
(k, v) for k, v in (
kv.split('=') for kv in options.installer_environment))
except Exception:
installer_environment_dict = {}
if installer_environment_dict:
pkginfo['installer_environment'] = installer_environment_dict
if options.notes:
pkginfo['notes'] = read_file_or_string(options.notes)
if options.apple_update:
# remove minimum_os_version as we don't include it for this option
pkginfo.pop('minimum_os_version')
if options.catalog:
pkginfo['catalogs'] = options.catalog
else:
pkginfo['catalogs'] = ['testing']
if options.pkgvers:
pkginfo['version'] = options.pkgvers
else:
pkginfo['version'] = "1.0"
pkginfo['name'] = options.apple_update
if options.displayname:
pkginfo['display_name'] = options.displayname
pkginfo['installer_type'] = 'apple_update_metadata'
# add user/environment metadata
pkginfo['_metadata'] = make_pkginfo_metadata()
# return the info
return pkginfo
def check_mode(option, opt, value, parser):
'''Callback to check --mode options'''
modes = value.replace(',', ' ').split()
value = None
rex = re.compile("[augo]+[=+-][rstwxXugo]+")
for mode in modes:
if rex.match(mode):
value = mode if not value else (value + "," + mode)
else:
raise optparse.OptionValueError(
"option %s: invalid mode: %s" % (opt, mode))
setattr(parser.values, option.dest, value)
def add_option_groups(parser):
'''Adds our (many) option groups to the options parser'''
# Default override options
default_override_options = optparse.OptionGroup(
parser, 'Default Override Options',
('Options specified will override information automatically derived '
'from the package.'))
default_override_options.add_option(
'--name',
metavar='NAME',
help='Name of the package.'
)
default_override_options.add_option(
'--displayname',
metavar='DISPLAY_NAME',
help='Display name of the package.'
)
default_override_options.add_option(
'--description',
metavar='STRING|PATH',
help=('Description of the package. '
'Can be a PATH to a file (plain text or html).')
)
default_override_options.add_option(
'--pkgvers',
metavar='PACKAGE_VERSION',
help='Version of the package.'
)
default_override_options.add_option(
'--RestartAction',
metavar='ACTION',
help=('Specify a \'RestartAction\' for the package. '
'Supported actions: RequireRestart, RequireLogout, or '
'RecommendRestart')
)
default_override_options.add_option(
'--uninstall_method', '--uninstall-method',
metavar='METHOD|PATH',
help=('Specify an \'uninstall_method\' for the package. '
'Default method depends on the package type: i.e. '
'drag-n-drop, Apple package, or an embedded uninstall script. '
'Can be a path to a script on the client computer.')
)
parser.add_option_group(default_override_options)
# Script options
script_options = optparse.OptionGroup(
parser, 'Script Options',
'All scripts are read and embedded into the pkginfo.')
script_options.add_option(
'--installcheck_script', '--installcheck-script',
metavar='SCRIPT_PATH',
help=('Path to an optional installcheck script to be '
'run to determine if item should be installed. '
'An exit code of 0 indicates installation should occur. '
'Takes precedence over installs items and receipts.')
)
script_options.add_option(
'--uninstallcheck_script', '--uninstallcheck-script',
metavar='SCRIPT_PATH',
help=('Path to an optional uninstallcheck script to be '
'run to determine if item should be uninstalled. '
'An exit code of 0 indicates uninstallation should occur. '
'Takes precedence over installs items and receipts.')
)
script_options.add_option(
'--preinstall_script', '--preinstall-script',
metavar='SCRIPT_PATH',
help=('Path to an optional preinstall script to be '
'run before installation of the item.')
)
script_options.add_option(
'--postinstall_script', '--postinstall-script',
metavar='SCRIPT_PATH',
help=('Path to an optional postinstall script to be '
'run after installation of the item.')
)
script_options.add_option(
'--preuninstall_script', '--preuninstall-script',
metavar='SCRIPT_PATH',
help=('Path to an optional preuninstall script to be run '
'before removal of the item.')
)
script_options.add_option(
'--postuninstall_script', '--postuninstall-script',
metavar='SCRIPT_PATH',
help=('Path to an optional postuninstall script to be run '
'after removal of the item.')
)
script_options.add_option(
'--uninstall_script', '--uninstall-script',
metavar='SCRIPT_PATH',
help=('Path to an uninstall script to be run in order '
'to uninstall this item.')
)
parser.add_option_group(script_options)
# Drag-n-Drop options
dragdrop_options = optparse.OptionGroup(
parser, 'Drag-n-Drop Options',
('These options apply to installer items that are "drag-n-drop" '
'disk images.')
)
dragdrop_options.add_option(
'--itemname', '-i', '--appname', '-a',
metavar='ITEM',
dest='item',
help=('Name or relative path of the item to be installed. '
'Useful if there is more than one item at the root of the dmg '
'or the item is located in a subdirectory. '
'Absolute paths can be provided as well but they '
'must point to an item located within the dmg.')
)
dragdrop_options.add_option(
'--destinationpath', '-d',
metavar='PATH',
help=('Path to which the item should be copied. Defaults to '
'"/Applications".')
)
dragdrop_options.add_option(
'--destinationitemname', '--destinationitem',
metavar='NAME',
dest='destitemname',
help=('Alternate name for which the item should be copied as. '
'Specifying this option also alters the corresponding '
'"installs" item\'s path with the provided name.')
)
dragdrop_options.add_option(
'-o', '--owner',
metavar='USER',
dest='user',
help=('Sets the owner of the copied item. '
'The owner may be either a UID or a symbolic name. '
'The owner will be set recursively on the item.')
)
dragdrop_options.add_option(
'-g', '--group',
metavar='GROUP',
dest='group',
help=('Sets the group of the copied item. '
'The group may be either a GID or a symbolic name. '
'The group will be set recursively on the item.')
)
dragdrop_options.add_option(
'-m', '--mode',
metavar='MODE',
dest='mode',
action='callback',
type='string',
callback=check_mode,
help=('Sets the mode of the copied item. '
'The specified mode must be in symbolic form. '
'See the manpage for chmod(1) for more information. '
'The mode is applied recursively.')
)
parser.add_option_group(dragdrop_options)
# Apple package specific options
apple_options = optparse.OptionGroup(parser, 'Apple Package Options')
apple_options.add_option(
'--pkgname', '-p',
help=('If the installer item is a disk image containing multiple '
'packages, or the package to be installed is not at the root '
'of the mounted disk image, PKGNAME is a relative path from '
'the root of the mounted disk image to the specific package to '
'be installed.'
'If the installer item is a disk image containing an Adobe '
'CS4 Deployment Toolkit installation, PKGNAME is the name of '
'an Adobe CS4 Deployment Toolkit installer package folder at '
'the top level of the mounted dmg.'
'If this flag is missing, the AdobeUber* files should be at '
'the top level of the mounted dmg.')
)
apple_options.add_option(
'--installer_choices_xml', '--installer-choices-xml',
action='store_true',
help=('Generate installer choices for metapackages. '
'Note: Requires Mac OS X 10.6.6 or later.')
)
apple_options.add_option(
'--installer_environment', '--installer-environment', '-E',
action="append",
metavar='KEY=VALUE',
help=('Specifies key/value pairs to set environment variables for use '
'by /usr/sbin/installer. A key/value pair of '
'USER=CURRENT_CONSOLE_USER indicates that USER be set to the '
'GUI user, otherwise root. Can be specified multiple times.')
)
parser.add_option_group(apple_options)
# Adobe package specific options
adobe_options = optparse.OptionGroup(parser, 'Adobe-specific Options')
adobe_options.add_option(
'--uninstallerdmg', '--uninstallerpkg', '--uninstallpkg', '-U',
metavar='UNINSTALLERITEM', dest='uninstalleritem',
help=('If the installer item is a disk image containing an Adobe CS4 '
'Deployment Toolkit installation package or Adobe CS3 deployment '
'package, UNINSTALLERITEM is a path to a disk image containing '
'an AdobeUberUninstaller for this item.\n'
'If the installer item is a Creative Cloud Packager install '
'package, UNINSTALLERITEM is a path to the matching Creative '
'Cloud Packager uninstall package.')
)
parser.add_option_group(adobe_options)
# Forced/Unattended (install) options
forced_unattended_options = optparse.OptionGroup(
parser, 'Forced/Unattended Options')
forced_unattended_options.add_option(
'--unattended_install', '--unattended-install',
action='store_true',
help='Item can be installed without notifying the user.')
forced_unattended_options.add_option(
'--unattended_uninstall', '--unattended-uninstall',
action='store_true',
help='Item can be uninstalled without notifying the user.')
forced_unattended_options.add_option(
'--force_install_after_date', '--force-install-after-date',
metavar='DATE',
help=('Specify a date, in local time, after which the package will '
'be forcefully installed. DATE format: yyyy-mm-ddThh:mm:ssZ '
'Example: \'2011-08-11T12:55:00Z\' equates to 11 August 2011 '
'at 12:55 PM local time.')
)
parser.add_option_group(forced_unattended_options)
# 'installs' generation options
# (by itself since no installer_item needs to be specified)
gen_installs_options = optparse.OptionGroup(
parser, 'Generating \'installs\' items')
gen_installs_options.add_option(
'--file', '-f',
action="append",
metavar='PATH',
help=('Path to a filesystem item installed by this package, typically '
'an application. This generates an "installs" item for the '
'pkginfo, to be used to determine if this software has been '
'installed. Can be specified multiple times.')
)
parser.add_option_group(gen_installs_options)
# Apple update metadata pkg options
# (by itself since no installer_item needs to be specified)
apple_update_metadata_options = optparse.OptionGroup(
parser, 'Generating Apple update metadata items')
apple_update_metadata_options.add_option(
'--apple_update', '--apple-update',
metavar='PRODUCTKEY',
help=('Specify an Apple update \'productKey\' used to manipulate '
'the behavior of a pending Apple software update. '
'For example, a \'force_install_after_date\' key could be added '
'as opposed to importing the update into the munki repo.')
)
parser.add_option_group(apple_update_metadata_options)
# Additional options - misc. options that don't fit into other categories,
# and don't necessarily warrant the creation of their own option group
additional_options = optparse.OptionGroup(parser, 'Additional Options')
additional_options.add_option(
'--autoremove',
action='store_true',
help=('Indicates this package should be automatically removed if it is '
'not listed in any applicable \'managed_installs\'.')
)
additional_options.add_option(
'--OnDemand',
action='store_true',
help=('Indicates this package should be an OnDemand package '
'not listed in any applicable \'managed_installs\'.')
)
additional_options.add_option(
'--minimum_munki_version', '--minimum-munki-version',
metavar='VERSION',
help=('Minimum version of munki required to perform installation. '
'Uses format produced by \'--version\' query from any munki '
'utility.')
)
additional_options.add_option(
'--minimum_os_version', '--minimum-os-version', '--min-os-ver',
metavar='VERSION',
help='Minimum OS version for the installer item.'
)
additional_options.add_option(
'--maximum_os_version', '--maximum-os-version', '--max-os-ver',
metavar='VERSION',
help='Maximum OS version for the installer item.'
)
additional_options.add_option(
'--update_for', '--update-for', '-u',
action="append",
metavar='PKG_NAME',
help=('Specifies a package for which the current package is an update. '
'Can be specified multiple times to build an array of packages.')
)
additional_options.add_option(
'--requires', '-r',
action="append",
metavar='PKG_NAME',
help=('Specifies a package required by the current package. Can be '
'specified multiple times to build an array of required '
'packages.')
)
additional_options.add_option(
'--blocking_application', '--blocking-application', '-b',
action="append",
metavar='APP_NAME',
help=('Specifies an application that blocks installation. Can be '
'specified multiple times to build an array of blocking '
'applications.')
)
additional_options.add_option(
'--catalog', '-c',
action="append",
metavar='CATALOG_NAME',
help=('Specifies in which catalog the item should appear. The default '
'is \'testing\'. Can be specified multiple times to add the item '
'to multiple catalogs.')
)
additional_options.add_option(
'--category',
metavar='CATEGORY',
help='Category for display in Managed Software Center.'
)
additional_options.add_option(
'--developer',
metavar='DEVELOPER',
help='Developer name for display in Managed Software Center.'
)
additional_options.add_option(
'--icon', '--iconname', '--icon-name', '--icon_name',
metavar='ICONNAME',
help='Name of icon file for display in Managed Software Center.'
)
additional_options.add_option(
'--notes',
metavar='STRING|PATH',
help=('Specifies administrator provided notes to be embedded into the '
'pkginfo. Can be a PATH to a file.')
)
additional_options.add_option(
'--nopkg',
action='store_true',
help=('Indicates this pkginfo should have an \'installer_type\' of '
'\'nopkg\'. Ignored if a package or dmg argument is supplied.')
)
# secret option!
additional_options.add_option(
'--print-warnings',
action='store_true', default=True,
help=optparse.SUPPRESS_HELP
)
parser.add_option_group(additional_options)
| []
| []
| [
"__CFPREFERENCES_AVOID_DAEMON"
]
| [] | ["__CFPREFERENCES_AVOID_DAEMON"] | python | 1 | 0 | |
sign_out.py | import flask
from flask import render_template
import os
import sign_out
import login
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from flask import Flask, redirect, render_template, url_for , session, request, flash
from datetime import timedelta
import psycopg2
import requests
import json
from flask import jsonify
from flask_debug import Debug
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
Debug(app)
app.add_url_rule('/login' , view_func=login.login)
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
app.permanent_session_lifetime = timedelta(days=1)
app.secret_key = 'you'
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
def signout():
session.pop('password', None)
session.pop('email', None)
session.pop('name', None)
return redirect(url_for('signin')) | []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MoocOnline.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
poptox/loons/loons_output.py | # -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
import copy
cgitb.enable()
from loons import loons_model
from loons import loons_tables
class loons_OutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
b = form.getvalue('b')
m = form.getvalue('m')
r = form.getvalue('r')
pa = form.getvalue('pa')
sj = form.getvalue('sj')
t = form.getvalue('t')
no1 = form.getvalue('no1')
no2 = form.getvalue('no2')
no3 = form.getvalue('no3')
no4 = form.getvalue('no4')
n_o =[no1, no2, no3, no4]
n_o = np.asarray(n_o)
loons_obj = loons_model.loons(b, m, r, pa, sj, t, no1, no2, no3, no4)
# print loons_obj.b[4]
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'loons','page':'output'})
html = html + template.render(templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'loons',
'model_attributes':'Loons Population Model'})
html = html + loons_tables.table_all(loons_obj) #
html = html + """<table width="400" border="1" style="display:none">
<tr>
<td>number of class</td>
<td id="n_o_c">4</td>
</tr>
<tr>
<td>final population</td>
<td id="final">%s</td>
</tr>
</table>"""%(loons_obj.leslie_out)
html = html + template.render(templatepath + 'loons_jqplot.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {})
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', loons_OutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
tests/app/migrations/0005_remove_tree_category.py | # Generated by Django 2.1.15 on 2021-05-26 13:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20210526_1310'),
]
operations = [
migrations.RemoveField(
model_name='tree',
name='category',
),
]
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/go-openapi/analysis/debug.go | // Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analysis
import (
"os"
"github.com/go-openapi/analysis/internal/debug"
)
var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "")
| [
"\"SWAGGER_DEBUG\""
]
| []
| [
"SWAGGER_DEBUG"
]
| [] | ["SWAGGER_DEBUG"] | go | 1 | 0 | |
rest/taskrouter/reservations/dequeue/example-1.7.x.java | // Install the Java helper library from twilio.com/docs/java/install
import com.twilio.Twilio;
import com.twilio.rest.taskrouter.v1.workspace.task.Reservation;
public class Example {
// Get your Account SID and Auth Token from https://twilio.com/console
// To set up environment variables, see http://twil.io/secure
private static final String ACCOUNT_SID = System.getenv("TWILIO_ACCOUNT_SID");
private static final String AUTH_TOKEN = System.getenv("TWILIO_AUTH_TOKEN");
private static final String WORKSPACE_SID = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
private static final String TASK_SID = "WTXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
private static final String RESERVATION_SID = "WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
public static void main(String[] args) {
Twilio.init(ACCOUNT_SID, AUTH_TOKEN);
// dequeue a reservation
Reservation reservation = Reservation.updater(WORKSPACE_SID, TASK_SID, RESERVATION_SID)
.setInstruction("Dequeue")
.setDequeueFrom("+18001231234")
.update();
System.out.println(reservation.getDateUpdated());
}
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
]
| []
| [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | java | 2 | 0 | |
pandas/tests/util/test_util.py | # -*- coding: utf-8 -*-
import codecs
from collections import OrderedDict
import locale
import os
import sys
from uuid import uuid4
import pytest
from pandas.compat import PY3, intern
from pandas.util._decorators import deprecate_kwarg, make_signature
from pandas.util._move import BadMove, move_into_mutable_buffer, stolenbuf
import pandas.util._test_decorators as td
from pandas.util._validators import (
validate_args, validate_args_and_kwargs, validate_bool_kwarg,
validate_kwargs)
import pandas.core.common as com
import pandas.util.testing as tm
class TestDecorators(object):
def setup_method(self, method):
@deprecate_kwarg('old', 'new')
def _f1(new=False):
return new
@deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
def _f2(new=False):
return new
@deprecate_kwarg('old', 'new', lambda x: x + 1)
def _f3(new=0):
return new
@deprecate_kwarg('old', None)
def _f4(old=True, unchanged=True):
return old
self.f1 = _f1
self.f2 = _f2
self.f3 = _f3
self.f4 = _f4
def test_deprecate_kwarg(self):
x = 78
with tm.assert_produces_warning(FutureWarning):
result = self.f1(old=x)
assert result is x
with tm.assert_produces_warning(None):
self.f1(new=x)
def test_dict_deprecate_kwarg(self):
x = 'yes'
with tm.assert_produces_warning(FutureWarning):
result = self.f2(old=x)
assert result
def test_missing_deprecate_kwarg(self):
x = 'bogus'
with tm.assert_produces_warning(FutureWarning):
result = self.f2(old=x)
assert result == 'bogus'
def test_callable_deprecate_kwarg(self):
x = 5
with tm.assert_produces_warning(FutureWarning):
result = self.f3(old=x)
assert result == x + 1
with pytest.raises(TypeError):
self.f3(old='hello')
def test_bad_deprecate_kwarg(self):
with pytest.raises(TypeError):
@deprecate_kwarg('old', 'new', 0)
def f4(new=None):
pass
def test_deprecate_keyword(self):
x = 9
with tm.assert_produces_warning(FutureWarning):
result = self.f4(old=x)
assert result is x
with tm.assert_produces_warning(None):
result = self.f4(unchanged=x)
assert result is True
def test_rands():
r = tm.rands(10)
assert(len(r) == 10)
def test_rands_array():
arr = tm.rands_array(5, size=10)
assert(arr.shape == (10,))
assert(len(arr[0]) == 5)
arr = tm.rands_array(7, size=(10, 10))
assert(arr.shape == (10, 10))
assert(len(arr[1, 1]) == 7)
class TestValidateArgs(object):
fname = 'func'
def test_bad_min_fname_arg_count(self):
msg = "'max_fname_arg_count' must be non-negative"
with tm.assert_raises_regex(ValueError, msg):
validate_args(self.fname, (None,), -1, 'foo')
def test_bad_arg_length_max_value_single(self):
args = (None, None)
compat_args = ('foo',)
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args(self.fname, args,
min_fname_arg_count,
compat_args)
def test_bad_arg_length_max_value_multiple(self):
args = (None, None)
compat_args = dict(foo=None)
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args(self.fname, args,
min_fname_arg_count,
compat_args)
def test_not_all_defaults(self):
bad_arg = 'foo'
msg = ("the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
compat_args = OrderedDict()
compat_args['foo'] = 2
compat_args['bar'] = -1
compat_args['baz'] = 3
arg_vals = (1, -1, 3)
for i in range(1, 3):
with tm.assert_raises_regex(ValueError, msg):
validate_args(self.fname, arg_vals[:i], 2, compat_args)
def test_validation(self):
# No exceptions should be thrown
validate_args(self.fname, (None,), 2, dict(out=None))
compat_args = OrderedDict()
compat_args['axis'] = 1
compat_args['out'] = None
validate_args(self.fname, (1, None), 2, compat_args)
class TestValidateKwargs(object):
fname = 'func'
def test_bad_kwarg(self):
goodarg = 'f'
badarg = goodarg + 'o'
compat_args = OrderedDict()
compat_args[goodarg] = 'foo'
compat_args[badarg + 'o'] = 'bar'
kwargs = {goodarg: 'foo', badarg: 'bar'}
msg = (r"{fname}\(\) got an unexpected "
r"keyword argument '{arg}'".format(
fname=self.fname, arg=badarg))
with tm.assert_raises_regex(TypeError, msg):
validate_kwargs(self.fname, kwargs, compat_args)
def test_not_all_none(self):
bad_arg = 'foo'
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
compat_args = OrderedDict()
compat_args['foo'] = 1
compat_args['bar'] = 's'
compat_args['baz'] = None
kwarg_keys = ('foo', 'bar', 'baz')
kwarg_vals = (2, 's', None)
for i in range(1, 3):
kwargs = dict(zip(kwarg_keys[:i],
kwarg_vals[:i]))
with tm.assert_raises_regex(ValueError, msg):
validate_kwargs(self.fname, kwargs, compat_args)
def test_validation(self):
# No exceptions should be thrown
compat_args = OrderedDict()
compat_args['f'] = None
compat_args['b'] = 1
compat_args['ba'] = 's'
kwargs = dict(f=None, b=1)
validate_kwargs(self.fname, kwargs, compat_args)
def test_validate_bool_kwarg(self):
arg_names = ['inplace', 'copy']
invalid_values = [1, "True", [1, 2, 3], 5.0]
valid_values = [True, False, None]
for name in arg_names:
for value in invalid_values:
with tm.assert_raises_regex(ValueError,
"For argument \"%s\" "
"expected type bool, "
"received type %s" %
(name, type(value).__name__)):
validate_bool_kwarg(value, name)
for value in valid_values:
assert validate_bool_kwarg(value, name) == value
class TestValidateKwargsAndArgs(object):
fname = 'func'
def test_invalid_total_length_max_length_one(self):
compat_args = ('foo',)
kwargs = {'foo': 'FOO'}
args = ('FoO', 'BaZ')
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
def test_invalid_total_length_max_length_multiple(self):
compat_args = ('foo', 'bar', 'baz')
kwargs = {'foo': 'FOO', 'bar': 'BAR'}
args = ('FoO', 'BaZ')
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(kwargs) + len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)"
.format(fname=self.fname, max_length=max_length,
actual_length=actual_length))
with tm.assert_raises_regex(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
def test_no_args_with_kwargs(self):
bad_arg = 'bar'
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args['foo'] = -5
compat_args[bad_arg] = 1
msg = (r"the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=self.fname))
args = ()
kwargs = {'foo': -5, bad_arg: 2}
tm.assert_raises_regex(ValueError, msg,
validate_args_and_kwargs,
self.fname, args, kwargs,
min_fname_arg_count, compat_args)
args = (-5, 2)
kwargs = {}
tm.assert_raises_regex(ValueError, msg,
validate_args_and_kwargs,
self.fname, args, kwargs,
min_fname_arg_count, compat_args)
def test_duplicate_argument(self):
min_fname_arg_count = 2
compat_args = OrderedDict()
compat_args['foo'] = None
compat_args['bar'] = None
compat_args['baz'] = None
kwargs = {'foo': None, 'bar': None}
args = (None,) # duplicate value for 'foo'
msg = (r"{fname}\(\) got multiple values for keyword "
r"argument '{arg}'".format(fname=self.fname, arg='foo'))
with tm.assert_raises_regex(TypeError, msg):
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
def test_validation(self):
# No exceptions should be thrown
compat_args = OrderedDict()
compat_args['foo'] = 1
compat_args['bar'] = None
compat_args['baz'] = -2
kwargs = {'baz': -2}
args = (1, None)
min_fname_arg_count = 2
validate_args_and_kwargs(self.fname, args, kwargs,
min_fname_arg_count,
compat_args)
class TestMove(object):
def test_cannot_create_instance_of_stolenbuffer(self):
"""Stolen buffers need to be created through the smart constructor
``move_into_mutable_buffer`` which has a bunch of checks in it.
"""
msg = "cannot create 'pandas.util._move.stolenbuf' instances"
with tm.assert_raises_regex(TypeError, msg):
stolenbuf()
def test_more_than_one_ref(self):
"""Test case for when we try to use ``move_into_mutable_buffer`` when
the object being moved has other references.
"""
b = b'testing'
with pytest.raises(BadMove) as e:
def handle_success(type_, value, tb):
assert value.args[0] is b
return type(e).handle_success(e, type_, value, tb) # super
e.handle_success = handle_success
move_into_mutable_buffer(b)
def test_exactly_one_ref(self):
"""Test case for when the object being moved has exactly one reference.
"""
b = b'testing'
# We need to pass an expression on the stack to ensure that there are
# not extra references hanging around. We cannot rewrite this test as
# buf = b[:-3]
# as_stolen_buf = move_into_mutable_buffer(buf)
# because then we would have more than one reference to buf.
as_stolen_buf = move_into_mutable_buffer(b[:-3])
# materialize as bytearray to show that it is mutable
assert bytearray(as_stolen_buf) == b'test'
@pytest.mark.skipif(PY3, reason='bytes objects cannot be interned in py3')
def test_interned(self):
salt = uuid4().hex
def make_string():
# We need to actually create a new string so that it has refcount
# one. We use a uuid so that we know the string could not already
# be in the intern table.
return ''.join(('testing: ', salt))
# This should work, the string has one reference on the stack.
move_into_mutable_buffer(make_string())
refcount = [None] # nonlocal
def ref_capture(ob):
# Subtract two because those are the references owned by this
# frame:
# 1. The local variables of this stack frame.
# 2. The python data stack of this stack frame.
refcount[0] = sys.getrefcount(ob) - 2
return ob
with pytest.raises(BadMove):
# If we intern the string it will still have one reference but now
# it is in the intern table so if other people intern the same
# string while the mutable buffer holds the first string they will
# be the same instance.
move_into_mutable_buffer(ref_capture(intern(make_string()))) # noqa
assert refcount[0] == 1
def test_numpy_errstate_is_default():
# The defaults since numpy 1.6.0
expected = {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
'under': 'ignore'}
import numpy as np
from pandas.compat import numpy # noqa
# The errstate should be unchanged after that import.
assert np.geterr() == expected
@td.skip_if_windows
class TestLocaleUtils(object):
@classmethod
def setup_class(cls):
cls.locales = tm.get_locales()
cls.current_locale = locale.getlocale()
if not cls.locales:
pytest.skip("No locales found")
@classmethod
def teardown_class(cls):
del cls.locales
del cls.current_locale
def test_can_set_locale_valid_set(self):
# Setting the default locale should return True
assert tm.can_set_locale('') is True
def test_can_set_locale_invalid_set(self):
# Setting an invalid locale should return False
assert tm.can_set_locale('non-existent_locale') is False
def test_can_set_locale_invalid_get(self, monkeypatch):
# In some cases, an invalid locale can be set,
# but a subsequent getlocale() raises a ValueError
# See GH 22129
def mockgetlocale():
raise ValueError()
with monkeypatch.context() as m:
m.setattr(locale, 'getlocale', mockgetlocale)
assert tm.can_set_locale('') is False
def test_get_locales(self):
# all systems should have at least a single locale
# GH9744
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
pytest.skip("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
pytest.skip("Only a single locale found, no point in "
"trying to test setting another locale")
if com._all_none(*self.current_locale):
# Not sure why, but on some travis runs with pytest,
# getlocale() returned (None, None).
pytest.skip("Current locale is not set.")
locale_override = os.environ.get('LOCALE_OVERRIDE', None)
if locale_override is None:
lang, enc = 'it_CH', 'UTF-8'
elif locale_override == 'C':
lang, enc = 'en_US', 'ascii'
else:
lang, enc = locale_override.split('.')
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm.can_set_locale(new_locale):
with pytest.raises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
assert normalized_locale == new_locale
current_locale = locale.getlocale()
assert current_locale == self.current_locale
def test_make_signature():
# See GH 17608
# Case where the func does not have default kwargs
sig = make_signature(validate_kwargs)
assert sig == (['fname', 'kwargs', 'compat_args'],
['fname', 'kwargs', 'compat_args'])
# Case where the func does have default kwargs
sig = make_signature(deprecate_kwarg)
assert sig == (['old_arg_name', 'new_arg_name',
'mapping=None', 'stacklevel=2'],
['old_arg_name', 'new_arg_name', 'mapping', 'stacklevel'])
def test_safe_import(monkeypatch):
assert not td.safe_import("foo")
assert not td.safe_import("pandas", min_version="99.99.99")
# Create dummy module to be imported
import types
import sys
mod_name = "hello123"
mod = types.ModuleType(mod_name)
mod.__version__ = "1.5"
assert not td.safe_import(mod_name)
monkeypatch.setitem(sys.modules, mod_name, mod)
assert not td.safe_import(mod_name, min_version="2.0")
assert td.safe_import(mod_name, min_version="1.0")
| []
| []
| [
"LOCALE_OVERRIDE"
]
| [] | ["LOCALE_OVERRIDE"] | python | 1 | 0 | |
newcode/honeyvault_config.py | # The following dictionaries should be provided to buildcfg.py
# 1: base dictionary //only character words will be considered
# 2: tweak set file
# 3: dictionary with count // PCFG will be built over this
# 4: output PCFG file name/path
# 5: output Trie file name/path
# empty lines and line beginning with '#' will be discarded
# exact dicionary path should be given.
import math
import os
import random
DEBUG = os.environ.get("DEBUG", False)
BASE_DIR = os.getcwd()
thisdir = os.path.dirname(os.path.abspath(__file__))
# DIC_TRIE_FILE = 'data/english.tri'
# DICTIONARY_DAWG = '{}/Dictionary_Store/dictionary1.1.dawg.gz'.format(thisdir)
# STANDARD_DIC_FILE = "{}/Dictionary_Store/standard_english.tri.gz".format(thisdir)
# GRAMMAR_OUTPUT_FILE = "{}/data/combined.gmr.bz2".format(thisdir)
# GRAMMAR_INPUT_FILE = "{}/data/combined.tri.bz2".format(thisdir)
# HANDGRAMMAR_FILE = "{}/data/grammar.txt".format(thisdir)
STATIC_DIR = os.path.join(thisdir, 'static')
TRAINED_GRAMMAR_FILE = os.path.join(STATIC_DIR, 'grammar.cfg.gz')
if DEBUG:
TRAINED_GRAMMAR_FILE += '~orig'
VAULT_DIST_FILE = os.path.join(STATIC_DIR, 'vault_dist.cfg')
# Don't change
EPSILON = '|_|'
GRAMMAR_R = 0
MEMLIMMIT = 1024 # 1024 MB, 1GB
MIN_COUNT = 2
PRODUCTION = 1
NONTERMINAL = 1
TERMINAL = 1 - NONTERMINAL
REPR_SIZE = 4 # number of bytes to represent an integer. normally 4 bytes. But
# we might go for higher values for better security.
MAX_INT = 256 ** REPR_SIZE # value of maximum integer in this representation.
PASSWORD_LENGTH = 100 # length of the password encoding
HONEY_VAULT_GRAMMAR_SIZE = 500 # 400 bytes, 50 integers/rules
# This controls the size of the NoCrack vault. Refer to the Oakland 15 paper
# (NoCrack) for more details. If you change this remember to delete
# static/vault.db to see the effect. Need less to say, you will lose all your
# passwords. Export/import operation are on its way. (TODO: Import-Export
# functions)
HONEY_VAULT_S1 = 1000
HONEY_VAULT_S2 = 1000
HONEY_VAULT_STORAGE_SIZE = HONEY_VAULT_S1 + HONEY_VAULT_S2
# For each password there is 1 byte saying whether the password is m/c or human
# generated. '1' --> m/c or '0' --> human generated pw.
# TODO: move it to more succinct repr, Google's protobuf!
HONEY_VAULT_MACHINE_PASS_SET_SIZE = int(math.ceil(HONEY_VAULT_STORAGE_SIZE / 8))
HONEY_VAULT_ENCODING_SIZE = HONEY_VAULT_GRAMMAR_SIZE + \
HONEY_VAULT_STORAGE_SIZE * PASSWORD_LENGTH
HONEY_VAULT_TOTAL_CIPHER_SIZE = HONEY_VAULT_ENCODING_SIZE + \
int(math.ceil(HONEY_VAULT_MACHINE_PASS_SET_SIZE / 4)) + \
8 # PBKDF1 salt size
SECURITY_PARAM = 16
SECURITY_PARAM_IN_BASE64 = (SECURITY_PARAM * 4) / 3 + 1
# Static domain mapping list
STATIC_DOMAIN_LIST = '{}/server/static_domain_map.txt'.format(thisdir)
STATIC_DOMAIN_HASH_LIST = '{}/static/static_domain_hashes.txt'.format(thisdir)
# Machie generated password probability in set of 1000
MACHINE_GENRATED_PASS_PROB = 10
# Required by honey_client
HONEY_SERVER_URL = "http://localhost:5000/"
VAULT_FILE = 'static/vault.db'
L33T = {
'3': 'e', '4': 'a', '@': 'a',
'$': 's', '0': 'o', '1': 'i',
'z': 's'
}
if DEBUG:
random.seed(123456)
else:
random.seed(os.urandom(4))
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
chapters/appendix_c/tfx_template_example/kubeflow_dag_runner.py | # Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define KubeflowDagRunner to run the pipeline using Kubeflow."""
from __future__ import absolute_import, division, print_function
import os
from absl import logging
from pipeline import configs, pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.proto import trainer_pb2
from tfx.utils import telemetry_utils
# TFX pipeline produces many output files and metadata. All output data will be
# stored under this OUTPUT_DIR.
OUTPUT_DIR = os.path.join("gs://", configs.GCS_BUCKET_NAME)
# TFX produces two types of outputs, files and metadata.
# - Files will be created under PIPELINE_ROOT directory.
PIPELINE_ROOT = os.path.join(OUTPUT_DIR, "tfx_pipeline_output", configs.PIPELINE_NAME)
# The last component of the pipeline, "Pusher" will produce serving model under
# SERVING_MODEL_DIR.
SERVING_MODEL_DIR = os.path.join(PIPELINE_ROOT, "serving_model")
# Specifies data file directory. DATA_PATH should be a directory containing CSV
# files for CsvExampleGen in this example. By default, data files are in the
# `data` directory.
# NOTE: If you upload data files to GCS(which is recommended if you use
# Kubeflow), you can use a path starting "gs://YOUR_BUCKET_NAME/path" for
# DATA_PATH. For example,
# DATA_PATH = 'gs://bucket/chicago_taxi_trips/csv/'
DATA_PATH = "data"
def run():
"""Define a kubeflow pipeline."""
# Metadata config. The defaults works work with the installation of
# KF Pipelines using Kubeflow. If installing KF Pipelines using the
# lightweight deployment option, you may need to override the defaults.
# If you use Kubeflow, metadata will be written to MySQL database inside
# Kubeflow cluster.
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
# This pipeline automatically injects the Kubeflow TFX image if the
# environment variable 'KUBEFLOW_TFX_IMAGE' is defined. Currently, the tfx
# cli tool exports the environment variable to pass to the pipelines.
# TODO(b/157598477) Find a better way to pass parameters from CLI handler to
# pipeline DSL file, instead of using environment vars.
tfx_image = os.environ.get("KUBEFLOW_TFX_IMAGE", None)
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=metadata_config, tfx_image=tfx_image
)
pod_labels = kubeflow_dag_runner.get_default_pod_labels().update(
{telemetry_utils.LABEL_KFP_SDK_ENV: "tfx-template"}
)
kubeflow_dag_runner.KubeflowDagRunner(
config=runner_config, pod_labels_to_attach=pod_labels
).run(
pipeline.create_pipeline(
pipeline_name=configs.PIPELINE_NAME,
pipeline_root=PIPELINE_ROOT,
data_path=DATA_PATH,
preprocessing_fn=configs.PREPROCESSING_FN,
run_fn=configs.RUN_FN,
train_args=trainer_pb2.TrainArgs(num_steps=configs.TRAIN_NUM_STEPS),
eval_args=trainer_pb2.EvalArgs(num_steps=configs.EVAL_NUM_STEPS),
eval_accuracy_threshold=configs.EVAL_ACCURACY_THRESHOLD,
serving_model_dir=SERVING_MODEL_DIR,
beam_pipeline_args=configs.DATAFLOW_BEAM_PIPELINE_ARGS,
ai_platform_training_args=configs.GCP_AI_PLATFORM_TRAINING_ARGS,
# ai_platform_serving_args=configs.GCP_AI_PLATFORM_SERVING_ARGS,
)
)
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
run()
| []
| []
| [
"KUBEFLOW_TFX_IMAGE"
]
| [] | ["KUBEFLOW_TFX_IMAGE"] | python | 1 | 0 | |
git_config.py | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import errno
import json
import os
import re
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
from pyversion import is_python3
if is_python3():
import urllib.request
import urllib.error
else:
import urllib2
import imp
urllib = imp.new_module('urllib')
urllib.request = urllib2
urllib.error = urllib2
from signal import SIGTERM
from error import GitError, UploadError
from trace import Trace
if is_python3():
from http.client import HTTPException
else:
from httplib import HTTPException
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
R_HEADS = 'refs/heads/'
R_TAGS = 'refs/tags/'
ID_RE = re.compile(r'^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsId(rev):
return ID_RE.match(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(configfile = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(configfile = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, configfile, defaults=None, jsonFile=None):
self.file = configfile
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
self._json = jsonFile
if self._json is None:
self._json = os.path.join(
os.path.dirname(self.file),
'.repo_' + os.path.basename(self.file) + '.json')
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all_keys=False):
"""Get the first value for a key, or None if it is not defined.
This configuration file is used first, if the key is not
defined or all_keys = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all_keys = all_keys)
v = []
if not all_keys:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all_keys = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in range(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
def UrlInsteadOf(self, url):
"""Resolve any url.*.insteadof references.
"""
for new_url in self.GetSubSections('url'):
for old_url in self.GetString('url.%s.insteadof' % new_url, True):
if old_url is not None and url.startswith(old_url):
return new_url + url[len(old_url):]
return url
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadJson()
if d is None:
d = self._ReadGit()
self._SaveJson(d)
return d
def _ReadJson(self):
try:
if os.path.getmtime(self._json) \
<= os.path.getmtime(self.file):
os.remove(self._json)
return None
except OSError:
return None
try:
Trace(': parsing %s', self.file)
fd = open(self._json)
try:
return json.load(fd)
finally:
fd.close()
except (IOError, ValueError):
os.remove(self._json)
return None
def _SaveJson(self, cache):
try:
fd = open(self._json, 'w')
try:
json.dump(cache, fd, indent=2)
finally:
fd.close()
except (IOError, TypeError):
if os.path.exists(self._json):
os.remove(self._json)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
for line in d.decode('utf-8').rstrip('\0').split('\0'): # pylint: disable=W1401
# Backslash is not anomalous
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
return p.stdout
else:
GitError('git config %s: %s' % (str(args), p.stderr))
class RefSpec(object):
"""A Git refspec line, split into its components:
forced: True if the line starts with '+'
src: Left side of the line
dst: Right side of the line
"""
@classmethod
def FromString(cls, rs):
lhs, rhs = rs.split(':', 2)
if lhs.startswith('+'):
lhs = lhs[1:]
forced = True
else:
forced = False
return cls(forced, lhs, rhs)
def __init__(self, forced, lhs, rhs):
self.forced = forced
self.src = lhs
self.dst = rhs
def SourceMatches(self, rev):
if self.src:
if rev == self.src:
return True
if self.src.endswith('/*') and rev.startswith(self.src[:-1]):
return True
return False
def DestMatches(self, ref):
if self.dst:
if ref == self.dst:
return True
if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]):
return True
return False
def MapSource(self, rev):
if self.src.endswith('/*'):
return self.dst[:-1] + rev[len(self.src) - 1:]
return self.dst
def __str__(self):
s = ''
if self.forced:
s += '+'
if self.src:
s += self.src
if self.dst:
s += ':'
s += self.dst
return s
_master_processes = []
_master_keys = set()
_ssh_master = True
_master_keys_lock = None
def init_ssh():
"""Should be called once at the start of repo to init ssh master handling.
At the moment, all we do is to create our lock.
"""
global _master_keys_lock
assert _master_keys_lock is None, "Should only call init_ssh once"
_master_keys_lock = _threading.Lock()
def _open_ssh(host, port=None):
global _ssh_master
# Acquire the lock. This is needed to prevent opening multiple masters for
# the same host when we're running "repo sync -jN" (for N > 1) _and_ the
# manifest <remote fetch="ssh://xyz"> specifies a different host from the
# one that was passed to repo init.
_master_keys_lock.acquire()
try:
# Check to see whether we already think that the master is running; if we
# think it's already running, return right away.
if port is not None:
key = '%s:%s' % (host, port)
else:
key = host
if key in _master_keys:
return True
if not _ssh_master \
or 'GIT_SSH' in os.environ \
or sys.platform in ('win32', 'cygwin'):
# failed earlier, or cygwin ssh can't do this
#
return False
# We will make two calls to ssh; this is the common part of both calls.
command_base = ['ssh',
'-o','ControlPath %s' % ssh_sock(),
host]
if port is not None:
command_base[1:1] = ['-p', str(port)]
# Since the key wasn't in _master_keys, we think that master isn't running.
# ...but before actually starting a master, we'll double-check. This can
# be important because we can't tell that that '[email protected]' is the same
# as 'myhost.com' where "User git" is setup in the user's ~/.ssh/config file.
check_command = command_base + ['-O','check']
try:
Trace(': %s', ' '.join(check_command))
check_process = subprocess.Popen(check_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_process.communicate() # read output, but ignore it...
isnt_running = check_process.wait()
if not isnt_running:
# Our double-check found that the master _was_ infact running. Add to
# the list of keys.
_master_keys.add(key)
return True
except Exception:
# Ignore excpetions. We we will fall back to the normal command and print
# to the log there.
pass
command = command_base[:1] + \
['-M', '-N'] + \
command_base[1:]
try:
Trace(': %s', ' '.join(command))
p = subprocess.Popen(command)
except Exception as e:
_ssh_master = False
print('\nwarn: cannot enable ssh control master for %s:%s\n%s'
% (host,port, str(e)), file=sys.stderr)
return False
_master_processes.append(p)
_master_keys.add(key)
time.sleep(1)
return True
finally:
_master_keys_lock.release()
def close_ssh():
global _master_keys_lock
terminate_ssh_clients()
for p in _master_processes:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
del _master_processes[:]
_master_keys.clear()
d = ssh_sock(create=False)
if d:
try:
os.rmdir(os.path.dirname(d))
except OSError:
pass
# We're done with the lock, so we can delete it.
_master_keys_lock = None
URI_SCP = re.compile(r'^([^@:]*@?[^:/]{1,}):')
URI_ALL = re.compile(r'^([a-z][a-z+-]*)://([^@/]*@?[^/]*)/')
def GetSchemeFromUrl(url):
m = URI_ALL.match(url)
if m:
return m.group(1)
return None
@contextlib.contextmanager
def GetUrlCookieFile(url, quiet):
if url.startswith('persistent-'):
try:
p = subprocess.Popen(
['git-remote-persistent-https', '-print_config', url],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
cookieprefix = 'http.cookiefile='
proxyprefix = 'http.proxy='
cookiefile = None
proxy = None
for line in p.stdout:
line = line.strip()
if line.startswith(cookieprefix):
cookiefile = line[len(cookieprefix):]
if line.startswith(proxyprefix):
proxy = line[len(proxyprefix):]
# Leave subprocess open, as cookie file may be transient.
if cookiefile or proxy:
yield cookiefile, proxy
return
finally:
p.stdin.close()
if p.wait():
err_msg = p.stderr.read()
if ' -print_config' in err_msg:
pass # Persistent proxy doesn't support -print_config.
elif not quiet:
print(err_msg, file=sys.stderr)
except OSError as e:
if e.errno == errno.ENOENT:
pass # No persistent proxy.
raise
yield GitConfig.ForUser().GetString('http.cookiefile'), None
def _preconnect(url):
m = URI_ALL.match(url)
if m:
scheme = m.group(1)
host = m.group(2)
if ':' in host:
host, port = host.split(':')
else:
port = None
if scheme in ('ssh', 'git+ssh', 'ssh+git'):
return _open_ssh(host, port)
return False
m = URI_SCP.match(url)
if m:
host = m.group(1)
return _open_ssh(host)
return False
class Remote(object):
"""Configuration options related to a remote.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.url = self._Get('url')
self.review = self._Get('review')
self.projectname = self._Get('projectname')
self.fetch = list(map(RefSpec.FromString,
self._Get('fetch', all_keys=True)))
self._review_url = None
def _InsteadOf(self):
globCfg = GitConfig.ForUser()
urlList = globCfg.GetSubSections('url')
longest = ""
longestUrl = ""
for url in urlList:
key = "url." + url + ".insteadOf"
insteadOfList = globCfg.GetString(key, all_keys=True)
for insteadOf in insteadOfList:
if self.url.startswith(insteadOf) \
and len(insteadOf) > len(longest):
longest = insteadOf
longestUrl = url
if len(longest) == 0:
return self.url
return self.url.replace(longest, longestUrl, 1)
def PreConnectFetch(self):
connectionUrl = self._InsteadOf()
return _preconnect(connectionUrl)
def ReviewUrl(self, userEmail):
if self._review_url is None:
if self.review is None:
return None
u = self.review
if u.startswith('persistent-'):
u = u[len('persistent-'):]
if u.split(':')[0] not in ('http', 'https', 'sso'):
u = 'http://%s' % u
if u.endswith('/Gerrit'):
u = u[:len(u) - len('/Gerrit')]
if u.endswith('/ssh_info'):
u = u[:len(u) - len('/ssh_info')]
if not u.endswith('/'):
u += '/'
http_url = u
if u in REVIEW_CACHE:
self._review_url = REVIEW_CACHE[u]
elif 'REPO_HOST_PORT_INFO' in os.environ:
host, port = os.environ['REPO_HOST_PORT_INFO'].split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
REVIEW_CACHE[u] = self._review_url
elif u.startswith('sso:'):
self._review_url = u # Assume it's right
REVIEW_CACHE[u] = self._review_url
else:
try:
info_url = u + 'ssh_info'
info = urllib.request.urlopen(info_url).read()
if info == 'NOT_AVAILABLE' or '<' in info:
# If `info` contains '<', we assume the server gave us some sort
# of HTML response back, like maybe a login page.
#
# Assume HTTP if SSH is not enabled or ssh_info doesn't look right.
self._review_url = http_url
else:
host, port = info.split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
except urllib.error.HTTPError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except urllib.error.URLError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except HTTPException as e:
raise UploadError('%s: %s' % (self.review, e.__class__.__name__))
REVIEW_CACHE[u] = self._review_url
return self._review_url + self.projectname
def _SshReviewUrl(self, userEmail, host, port):
username = self._config.GetString('review.%s.username' % self.review)
if username is None:
username = userEmail.split('@')[0]
return 'ssh://%s@%s:%s/' % (username, host, port)
def ToLocal(self, rev):
"""Convert a remote revision string to something we have locally.
"""
if self.name == '.' or IsId(rev):
return rev
if not rev.startswith('refs/'):
rev = R_HEADS + rev
for spec in self.fetch:
if spec.SourceMatches(rev):
return spec.MapSource(rev)
if not rev.startswith(R_HEADS):
return rev
raise GitError('remote %s does not have %s' % (self.name, rev))
def WritesTo(self, ref):
"""True if the remote stores to the tracking ref.
"""
for spec in self.fetch:
if spec.DestMatches(ref):
return True
return False
def ResetFetch(self, mirror=False):
"""Set the fetch refspec to its default value.
"""
if mirror:
dst = 'refs/heads/*'
else:
dst = 'refs/remotes/%s/*' % self.name
self.fetch = [RefSpec(True, 'refs/heads/*', dst)]
def Save(self):
"""Save this remote to the configuration.
"""
self._Set('url', self.url)
self._Set('review', self.review)
self._Set('projectname', self.projectname)
self._Set('fetch', list(map(str, self.fetch)))
def _Set(self, key, value):
key = 'remote.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'remote.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
class Branch(object):
"""Configuration options related to a single branch.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.merge = self._Get('merge')
r = self._Get('remote')
if r:
self.remote = self._config.GetRemote(r)
else:
self.remote = None
@property
def LocalMerge(self):
"""Convert the merge spec to a local name.
"""
if self.remote and self.merge:
return self.remote.ToLocal(self.merge)
return None
def Save(self):
"""Save this branch back into the configuration.
"""
if self._config.HasSection('branch', self.name):
if self.remote:
self._Set('remote', self.remote.name)
else:
self._Set('remote', None)
self._Set('merge', self.merge)
else:
fd = open(self._config.file, 'a')
try:
fd.write('[branch "%s"]\n' % self.name)
if self.remote:
fd.write('\tremote = %s\n' % self.remote.name)
if self.merge:
fd.write('\tmerge = %s\n' % self.merge)
finally:
fd.close()
def _Set(self, key, value):
key = 'branch.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'branch.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
| []
| []
| [
"REPO_HOST_PORT_INFO"
]
| [] | ["REPO_HOST_PORT_INFO"] | python | 1 | 0 | |
Fujitsu/benchmarks/maskrcnn/implementations/implementation_closed/maskrcnn_benchmark/utils/mlperf_logger.py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import subprocess
import torch
import numpy as np
from mlperf_logging import mllog
from mlperf_logging.mllog import constants
mllogger = mllog.get_mllogger()
def log_start(*args, **kwargs):
_log_print(mllogger.start, *args, **kwargs)
def log_end(*args, **kwargs):
_log_print(mllogger.end, *args, **kwargs)
def log_event(*args, **kwargs):
_log_print(mllogger.event, *args, **kwargs)
def _log_print(logger, *args, **kwargs):
"""
Wrapper for MLPerf compliance logging calls.
All arguments but 'log_all_ranks' are passed to
mlperf_logging.mllog.
If 'log_all_ranks' is set to True then all distributed workers will print
logging message, if set to False then only worker with rank=0 will print
the message.
"""
if 'stack_offset' not in kwargs:
kwargs['stack_offset'] = 3
if 'value' not in kwargs:
kwargs['value'] = None
if kwargs.pop('log_all_ranks', False):
log = True
else:
log = (get_rank() == 0)
if log:
logger(*args, **kwargs)
def configure_logger(benchmark):
mllog.config(filename=os.path.join(os.path.dirname(os.path.abspath(__file__)), f'{benchmark}.log'))
mllogger = mllog.get_mllogger()
mllogger.logger.propagate = False
def mlperf_submission_log(benchmark):
required_dist_init = ['RANK', 'WORLD_SIZE', 'MASTER_ADDR', 'MASTER_PORT']
if all(var in os.environ for var in required_dist_init):
torch.distributed.init_process_group(backend='nccl', init_method='env://')
num_nodes = os.environ.get('SLURM_NNODES', 1)
configure_logger(benchmark)
log_event(
key=constants.SUBMISSION_BENCHMARK,
value=benchmark,
)
log_event(
key=constants.SUBMISSION_ORG,
value='Fujitsu')
log_event(
key=constants.SUBMISSION_DIVISION,
value='closed')
log_event(
key=constants.SUBMISSION_STATUS,
value='onprem')
log_event(
key=constants.SUBMISSION_PLATFORM,
value=f'1xGX2570M5')
def barrier():
"""
Works as a temporary distributed barrier, currently pytorch
doesn't implement barrier for NCCL backend.
Calls all_reduce on dummy tensor and synchronizes with GPU.
"""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def generate_seeds(rng, size):
seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)]
return seeds
def broadcast_seeds(seeds, device):
if torch.distributed.is_initialized():
seeds_tensor = torch.LongTensor(seeds).to(device)
torch.distributed.broadcast(seeds_tensor, 0)
seeds = seeds_tensor.tolist()
return seeds
def set_seeds(args):
if args.no_cuda:
device = torch.device('cpu')
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda')
# make sure that all workers has the same master seed
args.seed = broadcast_seeds(args.seed, device)
local_seed = (args.seed + get_rank()) % 2**32
print(get_rank(), "Using seed = {}".format(local_seed))
torch.manual_seed(local_seed)
np.random.seed(seed=local_seed)
return local_seed
| []
| []
| [
"SLURM_NNODES"
]
| [] | ["SLURM_NNODES"] | python | 1 | 0 | |
vendor/github.com/influx6/gobuild/srcpath/srcpath.go | package srcpath
import (
"os"
"path/filepath"
)
var (
goPath = os.Getenv("GOPATH")
goSrcPath = filepath.Join(goPath, "src")
)
// SrcPath returns current go src path.
func SrcPath() string {
return goSrcPath
}
// FromSrcPath returns the giving path as absolute from the gosrc path.
func FromSrcPath(pr string) string {
return filepath.Join(goSrcPath, pr)
}
// RelativeToSrc returns a path that is relative to the go src path.
func RelativeToSrc(path string) (string, error) {
return filepath.Rel(goSrcPath, path)
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
elyra/pipeline/airflow/processor_airflow.py | #
# Copyright 2018-2022 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from datetime import datetime
import json
import os
import re
import string
import tempfile
import time
from typing import Dict
from typing import List
from typing import Union
import autopep8
from jinja2 import Environment
from jinja2 import PackageLoader
from traitlets import CUnicode
from traitlets import List as ListTrait
from elyra._version import __version__
from elyra.airflow.operator import BootscriptBuilder
from elyra.metadata.schemaspaces import RuntimeImages
from elyra.metadata.schemaspaces import Runtimes
from elyra.pipeline.component_catalog import ComponentCache
from elyra.pipeline.pipeline import GenericOperation
from elyra.pipeline.pipeline import Operation
from elyra.pipeline.processor import PipelineProcessor
from elyra.pipeline.processor import PipelineProcessorResponse
from elyra.pipeline.processor import RuntimePipelineProcessor
from elyra.pipeline.runtime_type import RuntimeProcessorType
from elyra.util.github import GithubClient
try:
from elyra.util.gitlab import GitLabClient
except ImportError:
pass
from elyra.util.gitutil import SupportedGitTypes
from elyra.util.path import get_absolute_path
class AirflowPipelineProcessor(RuntimePipelineProcessor):
_type = RuntimeProcessorType.APACHE_AIRFLOW
_name = 'airflow'
# Provide users with the ability to identify a writable directory in the
# running container where the notebook | script is executed. The location
# must exist and be known before the container is started.
# Defaults to `/tmp`
WCD = os.getenv('ELYRA_WRITABLE_CONTAINER_DIR', '/tmp').strip().rstrip('/')
# This specifies the default airflow operators included with Elyra. Any Airflow-based
# custom connectors should create/extend the elyra configuration file to include
# those fully-qualified operator/class names.
available_airflow_operators = ListTrait(
CUnicode(),
["airflow.operators.slack_operator.SlackAPIPostOperator",
"airflow.operators.bash_operator.BashOperator",
"airflow.operators.email_operator.EmailOperator",
"airflow.operators.http_operator.SimpleHttpOperator",
"airflow.contrib.operators.spark_sql_operator.SparkSqlOperator",
"airflow.contrib.operators.spark_submit_operator.SparkSubmitOperator"],
help="""List of available Apache Airflow operator names.
Operators available for use within Apache Airflow pipelines. These operators must
be fully qualified (i.e., prefixed with their package names).
""",
).tag(config=True)
# Contains mappings from class to import statement for each available Airflow operator
class_import_map = {}
def __init__(self, root_dir, **kwargs):
super().__init__(root_dir, **kwargs)
if not self.class_import_map: # Only need to load once
for package in self.available_airflow_operators:
parts = package.rsplit(".", 1)
self.class_import_map[parts[1]] = f"from {parts[0]} import {parts[1]}"
self.log.debug(f"class_package_map = {self.class_import_map}")
def process(self, pipeline):
t0_all = time.time()
timestamp = datetime.now().strftime("%m%d%H%M%S")
pipeline_name = f'{pipeline.name}-{timestamp}'
runtime_configuration = self._get_metadata_configuration(schemaspace=Runtimes.RUNTIMES_SCHEMASPACE_ID,
name=pipeline.runtime_config)
api_endpoint = runtime_configuration.metadata.get('api_endpoint')
cos_endpoint = runtime_configuration.metadata.get('cos_endpoint')
cos_bucket = runtime_configuration.metadata.get('cos_bucket')
git_type = SupportedGitTypes.get_instance_by_name(
runtime_configuration.metadata.get('git_type',
SupportedGitTypes.GITHUB.name))
if git_type == SupportedGitTypes.GITLAB and SupportedGitTypes.is_enabled(SupportedGitTypes.GITLAB) is False:
raise ValueError(
"Python package `python-gitlab` is not installed. "
"Please install using `elyra[gitlab]` to use GitLab as DAG repository."
)
github_api_endpoint = runtime_configuration.metadata.get('github_api_endpoint')
github_repo_token = runtime_configuration.metadata.get('github_repo_token')
github_repo = runtime_configuration.metadata.get('github_repo')
github_branch = runtime_configuration.metadata.get('github_branch')
self.log_pipeline_info(pipeline_name, "Submitting pipeline")
with tempfile.TemporaryDirectory() as temp_dir:
pipeline_export_path = os.path.join(temp_dir, f'{pipeline_name}.py')
self.log.debug("Creating temp directory %s", temp_dir)
pipeline_filepath = self.create_pipeline_file(pipeline=pipeline,
pipeline_export_format="py",
pipeline_export_path=pipeline_export_path,
pipeline_name=pipeline_name)
self.log.debug("Uploading pipeline file: %s", pipeline_filepath)
try:
if git_type == SupportedGitTypes.GITHUB:
git_client = GithubClient(server_url=github_api_endpoint,
token=github_repo_token,
repo=github_repo,
branch=github_branch)
else:
git_client = GitLabClient(server_url=github_api_endpoint,
token=github_repo_token,
project=github_repo,
branch=github_branch)
except BaseException as be:
raise RuntimeError(f'Unable to create a connection to {github_api_endpoint}: {str(be)}') from be
git_client.upload_dag(pipeline_filepath, pipeline_name)
self.log.info('Waiting for Airflow Scheduler to process and start the pipeline')
download_url = git_client.get_git_url(api_url=github_api_endpoint,
repository_name=github_repo,
repository_branch=github_branch)
self.log_pipeline_info(pipeline_name,
f"pipeline pushed to git: {download_url}",
duration=(time.time() - t0_all))
return AirflowPipelineProcessorResponse(
git_url=f'{download_url}',
run_url=f'{api_endpoint}',
object_storage_url=f'{cos_endpoint}',
object_storage_path=f'/{cos_bucket}/{pipeline_name}',
)
def export(self, pipeline, pipeline_export_format, pipeline_export_path, overwrite):
# Verify that the AirflowPipelineProcessor supports the given export format
self._verify_export_format(pipeline_export_format)
timestamp = datetime.now().strftime("%m%d%H%M%S")
pipeline_name = f'{pipeline.name}-{timestamp}'
absolute_pipeline_export_path = get_absolute_path(self.root_dir, pipeline_export_path)
if os.path.exists(absolute_pipeline_export_path) and not overwrite:
raise ValueError("File " + absolute_pipeline_export_path + " already exists.")
self.log_pipeline_info(pipeline_name, f"exporting pipeline as a .{pipeline_export_format} file")
new_pipeline_file_path = self.create_pipeline_file(pipeline=pipeline,
pipeline_export_format="py",
pipeline_export_path=absolute_pipeline_export_path,
pipeline_name=pipeline_name)
return new_pipeline_file_path
def _cc_pipeline(self, pipeline, pipeline_name):
runtime_configuration = self._get_metadata_configuration(schemaspace=Runtimes.RUNTIMES_SCHEMASPACE_ID,
name=pipeline.runtime_config)
image_namespace = self._get_metadata_configuration(schemaspace=RuntimeImages.RUNTIME_IMAGES_SCHEMASPACE_ID)
cos_endpoint = runtime_configuration.metadata.get('cos_endpoint')
cos_username = runtime_configuration.metadata.get('cos_username')
cos_password = runtime_configuration.metadata.get('cos_password')
cos_secret = runtime_configuration.metadata.get('cos_secret')
cos_directory = pipeline_name
cos_bucket = runtime_configuration.metadata.get('cos_bucket')
# Create dictionary that maps component Id to its ContainerOp instance
target_ops = []
self.log_pipeline_info(pipeline_name,
f"processing pipeline dependencies to: {cos_endpoint} "
f"bucket: {cos_bucket} folder: {pipeline_name}")
t0_all = time.time()
# Sort operations based on dependency graph (topological order)
sorted_operations = PipelineProcessor._sort_operations(pipeline.operations)
# Determine whether access to cloud storage is required and check connectivity
for operation in sorted_operations:
if isinstance(operation, GenericOperation):
self._verify_cos_connectivity(runtime_configuration)
break
# All previous operation outputs should be propagated throughout the pipeline.
# In order to process this recursively, the current operation's inputs should be combined
# from its parent's inputs (which, themselves are derived from the outputs of their parent)
# and its parent's outputs.
PipelineProcessor._propagate_operation_inputs_outputs(pipeline, sorted_operations)
# Scrub all node labels of invalid characters
scrubbed_operations = self._scrub_invalid_characters_from_list(sorted_operations)
# Generate unique names for all operations
unique_operations = self._create_unique_node_names(scrubbed_operations)
for operation in unique_operations:
if isinstance(operation, GenericOperation):
operation_artifact_archive = self._get_dependency_archive_name(operation)
self.log.debug("Creating pipeline component:\n {op} archive : {archive}".format(
op=operation, archive=operation_artifact_archive))
# Collect env variables
pipeline_envs = self._collect_envs(operation,
cos_secret=cos_secret,
cos_username=cos_username,
cos_password=cos_password)
# Generate unique ELYRA_RUN_NAME value and expose it as an
# environment variable in the container.
# Notebook | script nodes are implemented using the kubernetes_pod_operator
# (https://airflow.apache.org/docs/apache-airflow/1.10.12/_api/airflow/contrib/operators/kubernetes_pod_operator/index.html)
# Environment variables that are passed to this operator are
# pre-processed by Airflow at runtime and placeholder values (expressed as '{{ xyz }}'
# - see https://airflow.apache.org/docs/apache-airflow/1.10.12/macros-ref#default-variables)
# replaced.
if pipeline_envs is None:
pipeline_envs = {}
pipeline_envs['ELYRA_RUN_NAME'] = f'{pipeline_name}-{{{{ ts_nodash }}}}'
image_pull_policy = None
runtime_image_pull_secret = None
for image_instance in image_namespace:
if image_instance.metadata['image_name'] == operation.runtime_image:
if image_instance.metadata.get('pull_policy'):
image_pull_policy = image_instance.metadata['pull_policy']
if image_instance.metadata.get('pull_secret'):
runtime_image_pull_secret = image_instance.metadata['pull_secret']
break
bootscript = BootscriptBuilder(filename=operation.filename,
cos_endpoint=cos_endpoint,
cos_bucket=cos_bucket,
cos_directory=cos_directory,
cos_dependencies_archive=operation_artifact_archive,
inputs=operation.inputs,
outputs=operation.outputs)
target_op = {'notebook': operation.name,
'id': operation.id,
'argument_list': bootscript.container_cmd,
'runtime_image': operation.runtime_image,
'pipeline_envs': pipeline_envs,
'parent_operation_ids': operation.parent_operation_ids,
'image_pull_policy': image_pull_policy,
'cpu_request': operation.cpu,
'mem_request': operation.memory,
'gpu_limit': operation.gpu,
'operator_source': operation.component_params['filename'],
'is_generic_operator': True,
'doc': operation.doc
}
if runtime_image_pull_secret is not None:
target_op['runtime_image_pull_secret'] = runtime_image_pull_secret
target_ops.append(target_op)
self.log_pipeline_info(pipeline_name,
f"processing operation dependencies for id: {operation.id}",
operation_name=operation.name)
self._upload_dependencies_to_object_store(runtime_configuration,
pipeline_name,
operation)
else:
# Retrieve component from cache
component = ComponentCache.instance().get_component(self._type, operation.classifier)
# Convert the user-entered value of certain properties according to their type
for component_property in component.properties:
# Skip properties for which no value was given
if component_property.ref not in operation.component_params.keys():
continue
# Get corresponding property's value from parsed pipeline
property_value_dict = operation.component_params.get(component_property.ref)
# The type and value of this property can vary depending on what the user chooses
# in the pipeline editor. So we get the current active parameter (e.g. StringControl)
# from the activeControl value
active_property_name = property_value_dict['activeControl']
# One we have the value (e.g. StringControl) we use can retrieve the value
# assigned to it
property_value = property_value_dict.get(active_property_name, None)
# If the value is not found, assign it the default value assigned in parser
if property_value is None:
property_value = component_property.value
self.log.debug(f"Active property name : {active_property_name}, value : {property_value}")
self.log.debug(f"Processing component parameter '{component_property.name}' "
f"of type '{component_property.data_type}'")
if property_value and str(property_value)[0] == '{' and str(property_value)[-1] == '}' and \
isinstance(json.loads(json.dumps(property_value)), dict) and \
set(json.loads(json.dumps(property_value)).keys()) == {'value', 'option'}:
parent_node_name = self._get_node_name(target_ops,
json.loads(json.dumps(property_value))['value'])
processed_value = "\"{{ ti.xcom_pull(task_ids='" + parent_node_name + "') }}\""
operation.component_params[component_property.ref] = processed_value
elif component_property.data_type == "boolean":
operation.component_params[component_property.ref] = property_value
elif component_property.data_type == "string":
# Add surrounding quotation marks to string value for correct rendering
# in jinja DAG template
operation.component_params[component_property.ref] = json.dumps(property_value)
elif component_property.data_type == 'dictionary':
processed_value = self._process_dictionary_value(property_value)
operation.component_params[component_property.ref] = processed_value
elif component_property.data_type == 'list':
processed_value = self._process_list_value(property_value)
operation.component_params[component_property.ref] = processed_value
# Remove inputs and outputs from params dict until support for data exchange is provided
operation.component_params_as_dict.pop("inputs")
operation.component_params_as_dict.pop("outputs")
# Locate the import statement. If not found raise...
import_stmts = []
# Check for import statement on Component object, otherwise get from class_import_map
import_stmt = component.import_statement or self.class_import_map.get(component.name)
if import_stmt:
import_stmts.append(import_stmt)
else:
# If we didn't find a mapping to the import statement, let's check if the component
# name includes a package prefix. If it does, log a warning, but proceed, otherwise
# raise an exception.
if len(component.name.split(".")) > 1: # We (presumably) have a package prefix
self.log.warning(f"Operator '{component.name}' of node '{operation.name}' is not configured "
f"in the list of available Airflow operators but appears to include a "
f"package prefix and processing will proceed.")
else:
raise ValueError(f"Operator '{component.name}' of node '{operation.name}' is not configured "
f"in the list of available operators. Please add the fully-qualified "
f"package name for '{component.name}' to the "
f"AirflowPipelineProcessor.available_airflow_operators configuration.")
target_op = {'notebook': operation.name,
'id': operation.id,
'imports': import_stmts,
'class_name': component.name,
'parent_operation_ids': operation.parent_operation_ids,
'component_params': operation.component_params_as_dict,
'operator_source': component.component_source,
'is_generic_operator': False,
'doc': operation.doc
}
target_ops.append(target_op)
ordered_target_ops = OrderedDict()
while target_ops:
for i in range(len(target_ops)):
target_op = target_ops.pop(0)
if not target_op['parent_operation_ids']:
ordered_target_ops[target_op['id']] = target_op
self.log.debug("Root Node added : %s", ordered_target_ops[target_op['id']])
elif all(deps in ordered_target_ops.keys() for deps in target_op['parent_operation_ids']):
ordered_target_ops[target_op['id']] = target_op
self.log.debug("Dependent Node added : %s", ordered_target_ops[target_op['id']])
else:
target_ops.append(target_op)
self.log_pipeline_info(pipeline_name, "pipeline dependencies processed", duration=(time.time() - t0_all))
return ordered_target_ops
def create_pipeline_file(self, pipeline, pipeline_export_format, pipeline_export_path, pipeline_name):
self.log.info('Creating pipeline definition as a .' + pipeline_export_format + ' file')
if pipeline_export_format == "json":
with open(pipeline_export_path, 'w', encoding='utf-8') as file:
json.dump(pipeline_export_path, file, ensure_ascii=False, indent=4)
else:
# Load template from installed elyra package
loader = PackageLoader('elyra', 'templates/airflow')
template_env = Environment(loader=loader)
template_env.filters['regex_replace'] = lambda string: self._scrub_invalid_characters(string)
template = template_env.get_template('airflow_template.jinja2')
target_ops = self._cc_pipeline(pipeline, pipeline_name)
runtime_configuration = self._get_metadata_configuration(schemaspace=Runtimes.RUNTIMES_SCHEMASPACE_ID,
name=pipeline.runtime_config)
user_namespace = runtime_configuration.metadata.get('user_namespace') or 'default'
cos_secret = runtime_configuration.metadata.get('cos_secret')
pipeline_description = pipeline.description
if pipeline_description is None:
pipeline_description = f"Created with Elyra {__version__} pipeline editor using `{pipeline.source}`."
python_output = template.render(operations_list=target_ops,
pipeline_name=pipeline_name,
namespace=user_namespace,
cos_secret=cos_secret,
kube_config_path=None,
is_paused_upon_creation='False',
in_cluster='True',
pipeline_description=pipeline_description)
# Write to python file and fix formatting
with open(pipeline_export_path, "w") as fh:
# Defer the import to postpone logger messages: https://github.com/psf/black/issues/2058
import black
autopep_output = autopep8.fix_code(python_output)
output_to_file = black.format_str(autopep_output, mode=black.FileMode())
fh.write(output_to_file)
return pipeline_export_path
def _create_unique_node_names(self, operation_list: List[Operation]) -> List[Operation]:
unique_names = {}
for operation in operation_list:
# Ensure operation name is unique
new_name = operation.name
while new_name in unique_names:
new_name = f"{operation.name}_{unique_names[operation.name]}"
unique_names[operation.name] += 1
operation.name = new_name
unique_names[operation.name] = 1
return operation_list
def _scrub_invalid_characters_from_list(self, operation_list: List[Operation]) -> List[Operation]:
for operation in operation_list:
operation.name = self._scrub_invalid_characters(operation.name)
return operation_list
def _scrub_invalid_characters(self, name: str) -> str:
chars = re.escape(string.punctuation)
clean_name = re.sub(r'[' + chars + '\\s]', '_', name) # noqa E226
return clean_name
def _process_dictionary_value(self, value: str) -> Union[Dict, str]:
"""
For component parameters of type dictionary, if a string value is returned from the superclass
method, it must be converted to include surrounding quotation marks for correct rendering
in jinja DAG template.
"""
converted_value = super()._process_dictionary_value(value)
if isinstance(converted_value, str):
converted_value = json.dumps(converted_value)
return converted_value
def _process_list_value(self, value: str) -> Union[List, str]:
"""
For component parameters of type list, if a string value is returned from the superclass
method, it must be converted to include surrounding quotation marks for correct rendering
in jinja DAG template.
"""
converted_value = super()._process_list_value(value)
if isinstance(converted_value, str):
converted_value = json.dumps(converted_value)
return converted_value
def _get_node_name(self, operations_list: list, node_id: str) -> str:
for operation in operations_list:
if operation['id'] == node_id:
return operation['notebook']
class AirflowPipelineProcessorResponse(PipelineProcessorResponse):
_type = RuntimeProcessorType.APACHE_AIRFLOW
_name = 'airflow'
def __init__(self, git_url, run_url, object_storage_url, object_storage_path):
super().__init__(run_url, object_storage_url, object_storage_path)
self.git_url = git_url
def to_json(self):
response = super().to_json()
response['git_url'] = self.git_url
return response
| []
| []
| [
"ELYRA_WRITABLE_CONTAINER_DIR"
]
| [] | ["ELYRA_WRITABLE_CONTAINER_DIR"] | python | 1 | 0 | |
StartHere/src/main/java/com/lambdaschool/starthere/config/AuthorizationServerConfig.java | package com.lambdaschool.starthere.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.oauth2.config.annotation.configurers.ClientDetailsServiceConfigurer;
import org.springframework.security.oauth2.config.annotation.web.configuration.AuthorizationServerConfigurerAdapter;
import org.springframework.security.oauth2.config.annotation.web.configuration.EnableAuthorizationServer;
import org.springframework.security.oauth2.config.annotation.web.configurers.AuthorizationServerEndpointsConfigurer;
import org.springframework.security.oauth2.provider.token.TokenStore;
@Configuration
@EnableAuthorizationServer
public class AuthorizationServerConfig extends AuthorizationServerConfigurerAdapter
{
static final String CLIENT_ID = System.getenv("OAUTHCLIENTID"); // read from environment variable
static final String CLIENT_SECRET = System.getenv("OAUTHCLIENTSECRET"); // read from environment variable
// static final String CLIENT_ID = "lambda-client";
// static final String CLIENT_SECRET = "lambda-secret";
static final String GRANT_TYPE_PASSWORD = "password";
static final String AUTHORIZATION_CODE = "authorization_code";
static final String IMPLICIT = "implicit";
static final String SCOPE_READ = "read";
static final String SCOPE_WRITE = "write";
static final String TRUST = "trust";
static final int ACCESS_TOKEN_VALIDITY_SECONDS = 1 * 60 * 60;
@Autowired
private TokenStore tokenStore;
@Autowired
private AuthenticationManager authenticationManager;
@Autowired
private PasswordEncoder encoder;
@Override
public void configure(ClientDetailsServiceConfigurer configurer) throws Exception
{
// .authorizedGrantTypes(GRANT_TYPE_PASSWORD, AUTHORIZATION_CODE, REFRESH_TOKEN, IMPLICIT)
configurer.inMemory()
.withClient(CLIENT_ID)
.secret(encoder.encode(CLIENT_SECRET))
.authorizedGrantTypes(GRANT_TYPE_PASSWORD,
AUTHORIZATION_CODE,
IMPLICIT)
.scopes(SCOPE_READ,
SCOPE_WRITE,
TRUST)
.accessTokenValiditySeconds(ACCESS_TOKEN_VALIDITY_SECONDS);
}
@Override
public void configure(AuthorizationServerEndpointsConfigurer endpoints) throws Exception
{
endpoints.tokenStore(tokenStore)
.authenticationManager(authenticationManager);
endpoints.pathMapping("/oauth/token",
"/login");
}
} | [
"\"OAUTHCLIENTID\"",
"\"OAUTHCLIENTSECRET\""
]
| []
| [
"OAUTHCLIENTID",
"OAUTHCLIENTSECRET"
]
| [] | ["OAUTHCLIENTID", "OAUTHCLIENTSECRET"] | java | 2 | 0 | |
src/batou/utils.py | from batou import output, DeploymentError
from collections import defaultdict
import contextlib
import fcntl
import hashlib
import itertools
import os
import pkg_resources
import socket
import subprocess
import sys
import time
def self_id():
template = 'batou/{version} ({python}, {system})'
system = os.uname()
system = ' '.join([system[0], system[2], system[4]])
version = pkg_resources.require("batou")[0].version
python = sys.implementation.name
python += ' {0}.{1}.{2}-{3}{4}'.format(*sys.version_info)
return template.format(**locals())
class MultiFile(object):
def __init__(self, files):
self.files = files
def write(self, value):
for file in self.files:
file.write(value)
def flush(self):
for file in self.files:
file.flush()
@contextlib.contextmanager
def locked(filename):
# XXX can we make this not leave files around?
with open(filename, 'a+') as lockfile:
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print('Could not acquire lock {}'.format(filename),
file=sys.stderr)
raise RuntimeError(
'cannot create lock "%s": more than one instance running '
'concurrently?' % lockfile, lockfile)
# publishing the process id comes handy for debugging
lockfile.seek(0)
lockfile.truncate()
print(os.getpid(), file=lockfile)
lockfile.flush()
yield
lockfile.seek(0)
lockfile.truncate()
def flatten(list_of_lists):
return list(itertools.chain.from_iterable(list_of_lists))
def notify_send(title, description):
subprocess.call(['notify-send', title, description])
def notify_macosx(title, description):
subprocess.call([
'osascript', '-e',
'display notification "{}" with title "{}"'.format(
description, title)])
def notify_none(title, description):
pass
try:
subprocess.check_output(
['which', 'osascript'], stderr=subprocess.STDOUT)
notify = notify_macosx
except (subprocess.CalledProcessError, OSError):
try:
subprocess.check_output(
['which', 'notify-send'], stderr=subprocess.STDOUT)
notify = notify_send
except (subprocess.CalledProcessError, OSError):
notify = notify_none
resolve_override = {}
resolve_v6_override = {}
def resolve(host, resolve_override=resolve_override):
address = resolve_override.get(host)
if not address:
try:
address = socket.gethostbyname(host)
except socket.gaierror:
return None
return address
def resolve_v6(host, port, resolve_override=resolve_v6_override):
address = resolve_override.get(host)
if not address:
try:
address = socket.getaddrinfo(
host, int(port),
socket.AF_INET6)[0][4][0]
except socket.gaierror:
address = None
if address and address.startswith('fe80:'):
# Don't hand out link-local addresses. This happes with
# vagrant, and does not help as services cannot bind those
# addresses without additional configuration, i.e. the
# interface.
address = None
return address
class Address(object):
"""An internet service address that can be listened and connected to.
The constructor address is expected to be the address that can be
connected to. The listen address will be computed automatically.
.. code-block:: pycon
>>> x = Address('localhost', 80)
>>> str(x.connect)
'localhost:80'
>>> str(x.listen)
'127.0.0.1:80'
"""
#: The connect address as it should be used when configuring clients.
#: This is a :py:class:`batou.utils.NetLoc` object.
connect = None
#: The listen (or bind) address as it should be used when configuring
#: servers. This is a :py:class:`batou.utils.NetLoc` object.
listen = None
#: The IPv6 listen (or bind) address as it should be used when configuring
#: servers. This is a :py:class:`batou.utils.NetLoc` object or None, if
#: there is no IPv6 address.
listen_v6 = None
def __init__(self, connect_address, port=None):
if ':' in connect_address:
connect, port = connect_address.split(':')
else:
connect = connect_address
if port is None:
raise ValueError('Need port for service address.')
self.connect = NetLoc(connect, str(port))
v4_address = resolve(connect)
if v4_address:
self.listen = NetLoc(v4_address, str(port))
v6_address = resolve_v6(connect, port)
if v6_address:
self.listen_v6 = NetLoc(v6_address, str(port))
if not self.listen and not self.listen_v6:
raise socket.gaierror("No v4 or v6 address for %r" % connect)
def __lt__(self, other):
if isinstance(other, Address):
return str(self) < str(other)
return NotImplemented
def __str__(self):
return str(self.connect)
class NetLoc(object):
"""A network location specified by host and port.
Network locations can automatically render an appropriate string
representation:
.. code-block:: pycon
>>> x = NetLoc('127.0.0.1')
>>> x.host
'127.0.0.1'
>>> x.port
None
>>> str(x)
'127.0.0.1'
>>> y = NetLoc('127.0.0.1', 80)
>>> str(y)
'127.0.0.1:80'
"""
#: The host part of this network location. Can be a hostname or IP address.
host = None
#: The port of this network location. Can be ``None`` or an integer.
port = None
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
if self.port:
if ':' in self.host: # ipv6
fmt = '[{self.host}]:{self.port}'
else:
fmt = '{self.host}:{self.port}'
else:
fmt = '{self.host}'
return fmt.format(self=self)
def revert_graph(graph):
graph = ensure_graph_data(graph)
reverse_graph = defaultdict(set)
for node, dependencies in list(graph.items()):
# Ensure all nodes will exist
reverse_graph[node]
for dependency in dependencies:
reverse_graph[dependency].add(node)
return reverse_graph
def ensure_graph_data(graph):
# Ensure that all nodes exist as keys even if they don't have outgoing
# relations.
for node, relations in list(graph.items()):
for relation in relations:
if relation not in graph:
graph[relation] = set()
return graph
class CycleError(ValueError):
def __str__(self):
message = []
components = list(self.args[0].items())
components.sort(key=lambda x: x[0].name)
for component, subs in components:
message.append(component.name + ' depends on')
for sub in subs:
message.append(' ' + sub.name)
return '\n'.join(message)
def remove_nodes_without_outgoing_edges(graph):
for node, dependencies in list(graph.items()):
if not dependencies:
del graph[node]
def topological_sort(graph):
# Take a directed graph and provide a topological sort of all nodes.
#
# The graph is given as
#
# {node: [dependency, dependency], ...}
#
# If the graph has cycles a CycleError will be raised.
graph = ensure_graph_data(graph)
sorted = []
reverse_graph = revert_graph(graph)
roots = [node for node, incoming in list(reverse_graph.items())
if not incoming]
while roots:
root = roots.pop()
sorted.append(root)
for node in list(graph[root]):
graph[root].remove(node)
reverse_graph[node].remove(root)
if not reverse_graph[node]:
roots.append(node)
if any(graph.values()):
# Simplify the graph a bit to make it easier to spot the cycle.
remove_nodes_without_outgoing_edges(graph)
raise CycleError(dict(graph))
return sorted
class CmdExecutionError(DeploymentError, RuntimeError):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
self.args = (cmd, returncode, stdout, stderr)
def report(self):
output.error(self.cmd)
output.tabular("Return code", str(self.returncode), red=True)
output.line('STDOUT', red=True)
output.annotate(self.stdout)
output.line('STDERR', red=True)
output.annotate(self.stderr)
def cmd(cmd, silent=False, ignore_returncode=False, communicate=True,
env=None, acceptable_returncodes=[0], encoding='utf-8'):
if not isinstance(cmd, str):
# We use `shell=True`, so the command needs to be a single string and
# we need to pay attention to shell quoting.
quoted_args = []
for arg in cmd:
arg = arg.replace('\'', '\\\'')
if ' ' in arg:
arg = "'{}'".format(arg)
quoted_args.append(arg)
cmd = ' '.join(quoted_args)
if env is not None:
add_to_env = env
env = os.environ.copy()
env.update(add_to_env)
output.annotate('cmd: {}'.format(cmd), debug=True)
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True,
env=env)
if not communicate:
# XXX See #12550
return process
stdout, stderr = process.communicate()
if encoding is not None:
stdout = stdout.decode(encoding, errors='replace')
stderr = stderr.decode(encoding, errors='replace')
if process.returncode not in acceptable_returncodes:
if not ignore_returncode:
raise CmdExecutionError(
cmd, process.returncode, stdout, stderr)
return stdout, stderr
class Timer(object):
def __init__(self, note):
self.duration = 0
self.note = note
def __enter__(self):
self.started = time.time()
def __exit__(self, exc1, exc2, exc3):
self.duration = time.time() - self.started
output.annotate(self.note + ' took %fs' % self.duration, debug=True)
def hash(path, function='sha_512'):
h = getattr(hashlib, function)()
with open(path, 'rb') as f:
chunk = f.read(64*1024)
while chunk:
h.update(chunk)
chunk = f.read(64*1024)
return h.hexdigest()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
config/wsgi.py | """
WSGI config for meeting tool backend project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# meeting_tool_backend directory.
app_path = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
)
sys.path.append(os.path.join(app_path, "meeting_tool_backend"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
lib/src/layers/RNN.py | from src.layers.LayerHelper import *
from settings import LayerSettings as layerSettings
import tensorflow as tf
import os
CUDA_VISIBLE_DEVICES=0
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # set gpu number
def LSTM(name_, inputTensor_, numberOfOutputs_, isTraining_, dropoutProb_=None):
with tf.name_scope(name_):
cell = tf.nn.rnn_cell.LSTMCell(num_units=numberOfOutputs_,
use_peepholes=True,
initializer=layerSettings.LSTM_INITIALIZER,
forget_bias=1.0,
state_is_tuple=True,
activation=tf.nn.tanh,
name=name_+"_cell")
if dropoutProb_ != None:
dropoutProbTensor = tf.cond(isTraining_, lambda: 0.5, lambda: 1.0)
cell = tf.nn.rnn_cell.DropoutWrapper(cell,
input_keep_prob=dropoutProbTensor,
output_keep_prob=dropoutProbTensor)
statePlaceHolder = tf.nn.rnn_cell.LSTMStateTuple( tf.placeholder(layerSettings.FLOAT_TYPE, [None, numberOfOutputs_]),
tf.placeholder(layerSettings.FLOAT_TYPE, [None, numberOfOutputs_]) )
outputTensor, stateTensor = tf.nn.dynamic_rnn( cell=cell,
initial_state=statePlaceHolder,
inputs=inputTensor_)
# Add Regularization Loss
for eachVariable in tf.trainable_variables():
if name_ in eachVariable.name:
if ('bias' not in eachVariable.name)and(layerSettings.REGULARIZER_WEIGHTS_DECAY != None):
regularizationLoss = L2_Regularizer(eachVariable)
tf.losses.add_loss(regularizationLoss, loss_collection=tf.GraphKeys.REGULARIZATION_LOSSES)
return outputTensor, stateTensor, statePlaceHolder
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/e2e-test/main_test.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/ingress-gce/pkg/e2e"
_ "k8s.io/ingress-gce/pkg/klog"
"k8s.io/ingress-gce/pkg/version"
"k8s.io/klog"
// Pull in the auth library for GCP.
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
var (
flags struct {
run bool
inCluster bool
kubeconfig string
project string
region string
network string
seed int64
destroySandboxes bool
handleSIGINT bool
gceEndpointOverride string
createILBSubnet bool
enableIstio bool
}
Framework *e2e.Framework
)
func init() {
home := os.Getenv("HOME")
if home != "" {
flag.StringVar(&flags.kubeconfig, "kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
flag.StringVar(&flags.kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.BoolVar(&flags.run, "run", false, "set to true to run tests (suppresses test suite from 'go test ./...')")
flag.BoolVar(&flags.inCluster, "inCluster", false, "set to true if running in the cluster")
flag.StringVar(&flags.project, "project", "", "GCP project")
flag.StringVar(&flags.region, "region", "", "GCP Region (e.g. us-central1)")
flag.StringVar(&flags.network, "network", "", "GCP network name (e.g. default)")
flag.Int64Var(&flags.seed, "seed", -1, "random seed")
flag.BoolVar(&flags.destroySandboxes, "destroySandboxes", true, "set to false to leave sandboxed resources for debugging")
flag.BoolVar(&flags.handleSIGINT, "handleSIGINT", true, "catch SIGINT to perform clean")
flag.StringVar(&flags.gceEndpointOverride, "gce-endpoint-override", "", "If set, talks to a different GCE API Endpoint. By default it talks to https://www.googleapis.com/compute/v1/")
flag.BoolVar(&flags.createILBSubnet, "createILBSubnet", false, "If set, creates a proxy subnet for the L7 ILB")
flag.BoolVar(&flags.enableIstio, "enable-istio", false, "set to true if Istio is enabled.")
}
// TestMain is the entrypoint for the end-to-end test suite. This is where
// global resource setup should be done.
func TestMain(m *testing.M) {
flag.Parse()
if !flags.inCluster && !flags.run {
fmt.Fprintln(os.Stderr, "Set -run to run the tests.")
// Return 0 here so 'go test ./...' will succeed.
os.Exit(0)
}
if flags.project == "" {
fmt.Fprintln(os.Stderr, "-project must be set to the Google Cloud test project")
os.Exit(1)
}
if flags.region == "" {
fmt.Println("-region must be set to the region of the cluster")
os.Exit(1)
}
if flags.network == "" {
fmt.Fprintln(os.Stderr, "-network must be set to the network of the cluster")
os.Exit(1)
}
fmt.Printf("Version: %q, Commit: %q\n", version.Version, version.GitCommit)
var err error
var kubeconfig *rest.Config
if flags.inCluster {
kubeconfig, err = rest.InClusterConfig()
if err != nil {
klog.Fatalf("Error creating InClusterConfig(): %v", err)
}
} else {
kubeconfig, err = clientcmd.BuildConfigFromFlags("", flags.kubeconfig)
if err != nil {
klog.Fatalf("Error creating kubernetes clientset from %q: %v", flags.kubeconfig, err)
}
}
if flags.seed == -1 {
flags.seed = time.Now().UnixNano()
}
klog.Infof("Using random seed = %d", flags.seed)
Framework = e2e.NewFramework(kubeconfig, e2e.Options{
Project: flags.project,
Region: flags.region,
Network: flags.network,
Seed: flags.seed,
DestroySandboxes: flags.destroySandboxes,
GceEndpointOverride: flags.gceEndpointOverride,
CreateILBSubnet: flags.createILBSubnet,
EnableIstio: flags.enableIstio,
})
if flags.handleSIGINT {
Framework.CatchSIGINT()
}
if err := Framework.SanityCheck(); err != nil {
klog.Fatalf("Framework sanity check failed: %v", err)
}
os.Exit(m.Run())
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
test_runner.py | # -*- coding: utf-8 -*-
import shlex, subprocess
import os
import tarfile
import sys
import platform
if sys.version_info[0] < 3:
import urllib
urlretrieve = urllib.urlretrieve
else:
import urllib.request
urlretrieve = urllib.request.urlretrieve
boost_dir = os.environ.get("BOOST_PATH")
if not boost_dir:
boost_dir = 'D:\\boost_1_60_0'
LUA_VERSIONS = ["lua-5.3.3", "lua-5.2.4", "lua-5.1.5", "luajit"]
MAIJOR_TEST_LUA_VERSIONS = ["lua-5.3.3"]
TEST_MSVC_VERS = [("msvc2015", "Visual Studio 14 2015", "", True),
("msvc2015win64", "Visual Studio 14 2015 Win64", "", True),
("msvc2013", "Visual Studio 12 2013", "", False),
("msvc2013win64", "Visual Studio 12 2013 Win64", "", False),
("msvc2015", "Visual Studio 14 2015", "", True)]
TEST_COMPILERS = [
('gcc-4.7', 'g++-4.7', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('gcc-4.8', 'g++-4.8', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('gcc-4.9', 'g++-4.9', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('gcc-5', 'g++-5', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('gcc-6', 'g++-6', '-DCMAKE_CXX_FLAGS=-std=c++03', False),
('gcc-6', 'g++-6', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('gcc-6', 'g++-6', '-DCMAKE_CXX_FLAGS=-std=c++14', False),
('gcc', 'g++', '-DCMAKE_CXX_FLAGS=-std=c++11', True),
('clang', 'clang++', '-DCMAKE_CXX_FLAGS=-std=c++11', True),
('clang-3.5', 'clang++-3.5', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('clang-3.6', 'clang++-3.6', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('clang-3.7', 'clang++-3.7', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('clang-3.8', 'clang++-3.8', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('clang-3.9', 'clang++-3.9', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('clang-4.0', 'clang++-4.0', '-DCMAKE_CXX_FLAGS=-std=c++11', False),
('clang-3.8', 'clang++-3.8', '-DCMAKE_CXX_FLAGS=-std=c++14', False),
('clang-3.9', 'clang++-3.9', '-DCMAKE_CXX_FLAGS=-std=c++14', False), (
'clang-4.0', 'clang++-4.0', '-DCMAKE_CXX_FLAGS=-std=c++14', False)
]
def build_and_exec_test(compiler, lua_version, build_type, dir_opt):
ccompiler = compiler[0]
cxxcompiler = compiler[1]
addopt = compiler[2]
if os.system(cxxcompiler + ' -v 2> /dev/null') != 0: return
buildpath = "_build/" + compiler[
0] + "_" + lua_version + "_" + build_type + "_" + dir_opt
if not os.path.exists(buildpath):
os.makedirs(buildpath)
os.chdir(buildpath)
ret = os.system('CC=' + ccompiler + ' CXX=' + cxxcompiler +
' cmake ../../ ' + addopt + ' -DLOCAL_LUA_DIRECTORY=' +
"_build/" + lua_version + ' -DCMAKE_BUILD_TYPE=' +
build_type)
if ret != 0: #pass through cmake failed. e.g. not found lua
if lua_version in MAIJOR_TEST_LUA_VERSIONS:
raise Exception("cmake error at" + buildpath)
os.chdir("../../")
return
ret = os.system('make -j 2')
if ret != 0:
raise Exception("build error at" + buildpath)
testcommand = 'ctest --output-on-failure'
if platform.system() == 'Linux':
testcommand += ' -T memcheck'
ret = os.system(testcommand)
if ret != 0:
raise Exception("test error at" + buildpath)
os.chdir("../../")
def build_with_target_compiler(lua_version):
for i, compiler in enumerate(TEST_COMPILERS):
if not compiler[3] and lua_version not in MAIJOR_TEST_LUA_VERSIONS:
continue
build_and_exec_test(compiler, lua_version, "Debug", str(i))
if compiler[3]:
build_and_exec_test(compiler, lua_version, "Release", str(i))
def build_msvc_and_exec_test(msvcver, lua_version, build_type):
buildpath = '_build/' + msvcver[0] + '_' + lua_version
if not os.path.exists(buildpath):
os.makedirs(buildpath)
os.chdir(buildpath)
ret = os.system('cmake ../../ -DLOCAL_LUA_DIRECTORY=' + "_build/" +
lua_version + ' -G "' + msvcver[1] + '" ' + msvcver[2])
if ret != 0: #pass through cmake failed. e.g. not found lua
if lua_version in MAIJOR_TEST_LUA_VERSIONS:
raise Exception("cmake error at" + buildpath)
os.chdir("../../")
return
ret = os.system('cmake --build . --config ' + build_type)
if ret != 0:
raise Exception("build error at" + buildpath)
ret = os.system('ctest --output-on-failure -C ' + build_type)
if ret != 0:
raise Exception("test error at" + buildpath)
os.chdir("../../")
def build_with_msvc_ver(lua_version):
for msvcver in TEST_MSVC_VERS:
if not msvcver[3] and lua_version not in MAIJOR_TEST_LUA_VERSIONS:
continue
build_msvc_and_exec_test(msvcver, lua_version, 'Debug')
if msvcver[3]:
build_msvc_and_exec_test(msvcver, lua_version, 'Release')
if __name__ == '__main__':
for i, luaversion in enumerate(LUA_VERSIONS):
if not os.path.exists("_build/"):
os.makedirs("_build/")
if not os.path.exists("_build/" + luaversion) and luaversion != 'luajit':
if not os.path.exists(luaversion + ".tar.gz"):
lua_url = "https://www.lua.org/ftp/" + luaversion + ".tar.gz"
urlretrieve(lua_url, "_build/" + luaversion + ".tar.gz")
tf = tarfile.open("_build/" + luaversion + ".tar.gz", 'r')
tf.extractall("_build/")
if os.name == 'nt':
build_with_msvc_ver(luaversion, )
else:
build_with_target_compiler(luaversion)
| []
| []
| [
"BOOST_PATH"
]
| [] | ["BOOST_PATH"] | python | 1 | 0 | |
tensorflow/contrib/learn/python/learn/estimators/estimator.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or a serialized string of Summary.', key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise'
'OutOfRangeError`, the evaluation will never stop.'
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
core_summary.FileWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec((tag_constants.SERVING,), ()),)):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session, untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()]
output_names = [tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| []
| []
| []
| [] | [] | python | null | null | null |
lib/gitspindle/github.py | from gitspindle import *
from gitspindle.ansi import *
import datetime
import getpass
import github3
import github3.gists
import glob
import os
import re
import requests
import socket
import sys
import tempfile
import time
import webbrowser
class GitHub(GitSpindle):
prog = 'git hub'
what = 'GitHub'
spindle = 'github'
hosts = ['github.com', 'www.github.com', 'gist.github.com']
api = github3
# Support functions
def login(self):
host = self.config('host')
if host and host not in ('https://api.github.com', 'api.github.com'):
if not host.startswith(('http://', 'https://')):
try:
requests.get('https://' + host)
except:
err("%s is not reachable via https. Use http://%s to use the insecure http protocol" % (host, host))
host = 'https://' + host
self.gh = github3.GitHubEnterprise(url=host)
else:
self.gh = github3.GitHub()
user = self.config('user')
if not user:
user = raw_input("GitHub user: ").strip()
self.config('user', user)
token = self.config('token')
if not token:
password = getpass.getpass("GitHub password: ")
self.gh.login(user, password, two_factor_callback=lambda: prompt_for_2fa(user))
scopes = ['user', 'repo', 'gist', 'admin:public_key', 'admin:repo_hook', 'admin:org']
if user.startswith('git-spindle-test-'):
scopes.append('delete_repo')
name = "GitSpindle on %s" % socket.gethostname()
try:
auth = self.gh.authorize(user, password, scopes, name, "http://seveas.github.com/git-spindle")
except github3.GitHubError:
type, exc = sys.exc_info()[:2]
if not hasattr(exc, 'response'):
raise
response = exc.response
if response.status_code != 422:
raise
for error in response.json()['errors']:
if error['resource'] == 'OauthAccess' and error['code'] == 'already_exists':
if os.getenv('DEBUG') or self.question('An OAuth token for this host already exists. Shall I delete it?', default=False):
for auth in self.gh.authorizations():
if auth.app['name'] in (name, '%s (API)' % name):
auth.delete()
auth = self.gh.authorize(user, password, scopes, name, "http://seveas.github.com/git-spindle")
else:
err('Unable to create an OAuth token')
break
else:
raise
if auth is None:
err("Authentication failed")
token = auth.token
self.config('token', token)
self.config('auth-id', auth.id)
location = '%s - do not share this file' % self.config_file
if self.use_credential_helper:
location = 'git\'s credential helper'
print("A GitHub authentication token is now stored in %s" % location)
print("To revoke access, visit https://github.com/settings/applications")
if not user or not token:
err("No user or token specified")
self.gh.login(username=user, token=token)
try:
self.me = self.gh.me()
self.my_login = self.me.login
except github3.GitHubError:
# Token obsolete
self.config('token', None)
self.login()
def parse_url(self, url):
if url.hostname == 'gist.github.com':
return ['gist', url.path.split('/')[-1]]
else:
return ([self.my_login] + url.path.split('/'))[-2:]
def get_repo(self, remote, user, repo):
if user == 'gist':
# This is a gist, not a normal repo
repo_ = self.gh.gist(repo)
if not repo_:
err("Gist %s does not exist" % repo)
else:
repo_ = self.gh.repository(user, repo)
return repo_
def parent_repo(self, repo):
if repo.fork:
# In search results or lists parent info is not returned with a repository
return getattr(repo, 'parent', None) or self.gh.repository(repo.owner.login, repo.name).parent
def clone_url(self, repo, opts):
if opts['--ssh'] or repo.private:
return repo.ssh_url
if opts['--http']:
return repo.clone_url
if opts['--git']:
return repo.git_url
if self.my_login == repo.owner.login:
return repo.ssh_url
return repo.clone_url
def api_root(self):
if hasattr(self, 'gh'):
return self.gh.session.base_url
host = self.config('host')
if not host:
return 'https://api.github.com'
return host.rstrip('/') + '/api/v3'
def find_template(self, repo, template):
template = template.lower()
contents = None
for dir in ('/', '/.github/'):
try:
files = repo.directory_contents(dir)
except github3.exceptions.NotFoundError:
files = None
if not files or not hasattr(files, 'items'):
continue
files = dict([(k.lower(), v) for k, v in files])
if template in files:
contents = files[template]
else:
for file in files:
if file.startswith(template + '.'):
contents = files[file]
if contents:
contents = contents.name, self.gh.session.get(contents.download_url, stream=True).text
return contents
# Commands
@command
def add_collaborator(self, opts):
"""<user>...
Add a user as collaborator"""
repo = self.repository(opts)
for user in opts['<user>']:
repo.add_collaborator(user)
@command
def add_deploy_key(self, opts):
"""[--read-only] <key>...
Add a deploy key"""
repo = self.repository(opts)
url = repo._build_url('keys', base_url=repo._api)
for arg in opts['<key>']:
with open(arg) as fd:
algo, key, title = fd.read().strip().split(None, 2)
key = "%s %s" % (algo, key)
print("Adding deploy key %s" % arg)
repo.create_key(title=title, key=key, read_only=opts['--read-only'])
@command
def add_hook(self, opts):
"""<name> [<setting>...]
Add a repository hook"""
repo = self.repository(opts)
for hook in repo.hooks():
if hook.name == opts['<name>']:
err("Hook %s already exists" % opts['<name>'])
settings = dict([x.split('=', 1) for x in opts['<setting>']])
for key in settings:
if settings[key].isdigit():
settings[key] = int(settings[key])
events = settings.pop('events', 'push').split(',')
repo.create_hook(opts['<name>'], settings, events)
@command
def apply_pr(self, opts):
"""[--ssh|--http|--git] <pr-number>
Applies a pull request as a series of cherry-picks"""
repo = self.repository(opts)
pr = repo.pull_request(opts['<pr-number>'])
if not pr:
err("Pull request %s does not exist" % opts['<pr-number>'])
print("Applying PR#%d from %s: %s" % (pr.number, pr.user.refresh().name or pr.user.login, pr.title))
# Warnings
warned = False
cbr = self.gitm('rev-parse', '--symbolic-full-name', 'HEAD').stdout.strip().replace('refs/heads/','')
if cbr != pr.base.ref:
print(wrap("Pull request was filed against %s, but you're on the %s branch" % (pr.base.ref, cbr), fgcolor.red))
warned = True
if pr.merged_at:
print(wrap("Pull request was already merged at %s by %s" % (pr.merged_at, pr.merged_by), fgcolor.red))
if not pr.mergeable or pr.mergeable_state != 'clean':
print(wrap("Pull request will not apply cleanly", fgcolor.red))
warned = True
if pr.state == 'closed':
print(wrap("Pull request has already been closed", fgcolor.red))
warned = True
if warned:
if not self.question("Continue?", default=False):
sys.exit(1)
# Fetch PR if needed
sha = self.git('rev-parse', '--verify', 'refs/pull/%d/head' % pr.number).stdout.strip()
if sha != pr.head.sha:
print("Fetching pull request")
url = self.clone_url(self.gh.repository(pr.repository[0].replace('repos/', ''), pr.repository[1]), opts)
self.gitm('fetch', url, 'refs/pull/%d/head:refs/pull/%d/head' % (pr.number, pr.number), redirect=False)
head_sha = self.gitm('rev-parse', 'HEAD').stdout.strip()
if self.git('merge-base', pr.head.sha, head_sha).stdout.strip() == head_sha:
print("Fast-forward merging %d commit(s): %s..refs/pull/%d/head" % (pr.commits_count, pr.base.ref, pr.number))
self.gitm('merge', '--ff-only', 'refs/pull/%d/head' % pr.number, redirect=False)
else:
print("Cherry-picking %d commit(s): %s..refs/pull/%d/head" % (pr.commits_count, pr.base.ref, pr.number))
self.gitm('cherry-pick', '%s..refs/pull/%d/head' % (pr.base.ref, pr.number), redirect=False)
@command
@wants_parent
def add_remote(self, opts):
"""[--ssh|--http|--git] <user> [<name>]
Add user's fork as a named remote. The name defaults to the user's loginname"""
for fork in self.repository(opts).forks():
if fork.owner.login in opts['<user>']:
url = self.clone_url(fork, opts)
name = opts['<name>'] or fork.owner.login
self.gitm('remote', 'add', '-f', name, url, redirect=False)
@command
def add_public_keys(self, opts):
"""[<key>...]
Adds keys to your public keys"""
if not opts['<key>']:
opts['<key>'] = glob.glob(os.path.join(os.path.expanduser('~'), '.ssh', 'id_*.pub'))
existing = [x.key for x in self.gh.keys()]
for arg in opts['<key>']:
with open(arg) as fd:
algo, key, title = fd.read().strip().split(None, 2)
key = "%s %s" % (algo, key)
if key in existing:
continue
print("Adding %s" % arg)
self.gh.create_key(title=title, key=key)
@command
def browse(self, opts):
"""[--parent] [<repo>] [<section>]
Open the GitHub page for a repository in a browser"""
sections = ['issues', 'pulls', 'wiki', 'branches', 'releases', 'contributors', 'graphs', 'settings']
if opts['<repo>'] in sections and not opts['<section>']:
opts['<repo>'], opts['<section>'] = None, opts['<repo>']
repo = self.repository(opts)
url = repo.html_url
if opts['<section>']:
url += '/' + opts['<section>']
webbrowser.open_new(url)
@command
def calendar(self, opts):
"""[<user>]
Show a timeline of a user's activity"""
user = (opts['<user>'] or [self.my_login])[0]
months = []
rows = [[],[],[],[],[],[],[]]
commits = []
data = requests.get('https://github.com/users/%s/contributions' % user).text
# Sorry, zalgo!
data = re.findall(r'data-count="(.*?)" data-date="(.*?)"', data)
y, m, d = [int(x) for x in data[0][1].split('-')]
wd = (datetime.date(y,m,d).weekday()+1) % 7
for i in range(wd):
rows[i].append((None,None))
if wd:
months.append(m)
for (count, date) in data:
count = int(count)
y, m, d = [int(x) for x in date.split('-')]
wd = (datetime.date(y,m,d).weekday()+1) % 7
rows[wd].append((d, count))
if not wd:
months.append(m)
if count:
commits.append(count)
# Print months
sys.stdout.write(" ")
last = -1
skip = months[2] != months[0]
monthtext = ('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
for month in months:
if month != last:
sys.stdout.write(monthtext[month] + ' ')
skip = True
last = month
elif not skip:
sys.stdout.write(' ')
else:
skip = False
print("")
# Print commits
days = 'SMTWTFS'
commits.sort()
if len(commits) < 2:
p5 = p15 = p35 = 0
else:
p5 = commits[min(int(round(len(commits) * 0.95)), len(commits)-1)]
p15 = commits[min(int(round(len(commits) * 0.85)), len(commits)-1)]
p35 = commits[min(int(round(len(commits) * 0.65)), len(commits)-1)]
blob1 = b'\xe2\x96\xa0'.decode('utf-8')
blob2 = b'\xe2\x97\xbc'.decode('utf-8')
for rnum, row in enumerate(rows):
if rnum % 2:
sys.stdout.write(days[rnum] + " ")
else:
sys.stdout.write(" ")
for (day, count) in row:
if count is None:
color = attr.conceal
elif count > p5:
color = fgcolor.xterm(22)
elif count > p15:
color = fgcolor.xterm(28)
elif count > p35:
color = fgcolor.xterm(64)
elif count:
color = fgcolor.xterm(65)
else:
color = fgcolor.xterm(237)
if day == 1:
msg = wrap(blob2, attr.underline, color)
if not PY3:
msg = msg.encode('utf-8')
sys.stdout.write(msg)
else:
msg = wrap(blob1, color)
if not PY3:
msg = msg.encode('utf-8')
sys.stdout.write(msg)
sys.stdout.write(' ')
print("")
@command
def cat(self, opts):
"""<file>...
Display the contents of a file on GitHub"""
for arg in opts['<file>']:
repo, ref, file = ([None, None] + arg.split(':',2))[-3:]
user = None
if repo:
user, repo = ([None] + repo.split('/'))[-2:]
repo = self.gh.repository(user or self.my_login, repo)
else:
repo = self.repository(opts)
file = self.rel2root(file)
content = repo.file_contents(file, ref=ref or repo.default_branch)
if not content:
err("No such file: %s" % arg)
if content.type != 'file':
err("Not a regular file: %s" % arg)
resp = self.gh.session.get(content.download_url, stream=True)
for chunk in resp.iter_content(4096):
os.write(sys.stdout.fileno(), chunk)
@command
def check_pages(self, opts):
"""[<repo>] [--parent]
Check the github pages configuration and content of your repo"""
repo = self.repository(opts)
if opts['<repo>']:
self.clone(opts)
os.chdir(repo.name)
def warning(msg, url=None):
print(wrap(msg, fgcolor.yellow))
if url:
print(wrap(url, attr.faint))
def error(msg, url=None):
print(wrap(msg, fgcolor.red))
if url:
print(wrap(url, attr.faint))
# Old-style $user.github.com repos
if repo.name.lower() == repo.owner.login.lower() + '.github.com':
warning("Your repository is named %s.github.com, but should be named %s.github.io" % (repo.owner.login, repo.owner.login),
"https://help.github.com/articles/user-organization-and-project-pages/#user--organization-pages")
# if repo.name.lower() == repo.owner.login.lower() + '.github.io' and repo.name != repo.name.lower():
# error("You should not have capital letters in your repository name, please rename it from %s to %s" % (repo.name, repo.name.lower()))
# Which branch do we check?
if repo.name.lower() in (repo.owner.login.lower() + '.github.com', repo.owner.login.lower() + '.github.io'):
branchname = 'master'
else:
branchname = 'gh-pages'
# Do we have local changes?
if self.git('rev-parse', '--symbolic-full-name', 'HEAD').stdout.strip() == 'refs/heads/%s' % branchname and self.git('status', '--porcelain').stdout.strip():
warning("You have uncommitted changes. This tool checks the latest commit, not the working tree")
# Do we have a pages branch?
local = remote_tracking = remote = None
output = self.git('ls-remote', repo.remote or 'origin', 'refs/heads/%s' % branchname).stdout
for line in output.splitlines():
remote = line.split()[0]
if not remote:
error("You have no %s branch on GitHub" % branchname,
"https://help.github.com/articles/user-organization-and-project-pages/")
output = self.git('for-each-ref', '--format=%(refname) %(objectname) %(upstream:trackshort)',
'refs/remotes/%s/%s' % (repo.remote or 'origin', branchname),
'refs/heads/%s' % branchname,).stdout
for line in output.splitlines():
if line.startswith('refs/heads'):
ref, sha, ahead = line.split()
local = sha
if ahead == '<':
warning("Your local %s branch is behind the one on GitHub" % branchname)
elif ahead == '>':
warning("Your local %s branch is ahead of the one on GitHub" % branchname)
elif ahead == '<>':
warning("Your local %s branch has diverged from the one on GitHub" % branchname)
elif line.startswith('refs/remotes'):
ref, sha = line.split()
remote_tracking = sha
if remote != remote_tracking:
warning("You need to fetch %s from GitHub to get its latest revision" % branchname)
if not local or not remote_tracking:
warning("You have no %s branch locally" % branchname,
"https://help.github.com/articles/user-organization-and-project-pages/")
if local:
ref = 'refs/heads/%s' % branchname
elif remote_tracking:
ref = 'refs/remotes/%s/%s' % (repo.remote, branchname)
files = self.git('ls-tree', '-r', '--name-only', ref).stdout.splitlines()
# Do we have an index.html
if 'index.html' not in files:
warning("You have no index.html")
# Do we need .nojekyll (dirs starting with underscores)
if '.nojekyll' not in files and '_config.yml' not in files:
for file in files:
if file.startswith('_'):
warning("You have filenames starting with underscores, but no .nojekyll file",
"https://help.github.com/articles/using-jekyll-with-pages/#turning-jekyll-off")
break
# Do we have unverified emails
if repo.owner.login == self.me.login:
for mail in self.gh.emails():
if not mail.verified:
error("Unverified %s email address: %s" % (mail.primary and 'primary' or 'secondary', mail.email))
# Do we have a custom CNAME. Check DNS (Use meta api for A records)
for file in files:
if file.lower() == 'cname':
if file != 'CNAME':
error("The CNAME file must be named in all caps",
"https://help.github.com/articles/adding-a-cname-file-to-your-repository/")
cname = self.git('--no-pager', 'show', '%s:%s' % (ref, file)).stdout.strip()
pages_ips = self.gh.meta()['pages']
try:
import publicsuffix
except ImportError:
import gitspindle.public_suffix as publicsuffix
expect_cname = publicsuffix.PublicSuffixList(publicsuffix.fetch()).get_public_suffix(cname) != cname
try:
import dns
import dns.resolver
resolver = dns.resolver.Resolver()
answer = resolver.query(cname)
for rrset in answer.response.answer:
name = rrset.name.to_text().rstrip('.')
if name == cname:
for rr in rrset:
if rr.rdtype == dns.rdatatype.A and expect_cname:
warning("You should use a CNAME record for non-apex domains",
"https://help.github.com/articles/tips-for-configuring-a-cname-record-with-your-dns-provider/")
if rr.rdtype == dns.rdatatype.A and rr.address not in pages_ips:
error("IP address %s is incorreect for a pages site, use only %s" % (rr.address, ', '.join(pages_ips)),
"https://help.github.com/articles/tips-for-configuring-a-cname-record-with-your-dns-provider/")
if rr.rdtype == dns.rdatatype.CNAME and rr.target != '%s.github.io.' % repo.owner.login:
error("CNAME %s -> %s is incorrect, should be %s -> %s" % (name, rr.target, name, '%s.github.io.' % repo.owner.login),
"https://help.github.com/articles/tips-for-configuring-an-a-record-with-your-dns-provider/")
except ImportError:
if hasattr(self.shell, 'dig'):
lines = self.shell.dig('+nocomment', '+nocmd', '+nostats', '+noquestion', cname).stdout.splitlines()
for line in lines:
rname, ttl, _, rtype, value = line.split(None, 4)
if rname.rstrip('.') == cname:
if rtype == 'A' and expect_cname:
warning("You should use a CNAME record for non-apex domains",
"https://help.github.com/articles/tips-for-configuring-a-cname-record-with-your-dns-provider/")
if rtype == 'A' and value not in pages_ips:
error("IP address %s is incorreect for a pages site, use only %s" % (value, ', '.join(pages_ips)),
"https://help.github.com/articles/tips-for-configuring-a-cname-record-with-your-dns-provider/")
if rtype == 'CNAME' and value != '%s.github.io.' % repo.owner.login:
error("CNAME %s -> %s is incorrect, should be %s -> %s" % (rname, value, rname, '%s.github.io.' % repo.owner.login),
"https://help.github.com/articles/tips-for-configuring-an-a-record-with-your-dns-provider/")
else:
error("Cannot check DNS settings. Please install dnspython or dig")
break
@command
def clone(self, opts, repo=None):
"""[--ssh|--http|--git] [--triangular [--upstream-branch=<branch>]] [--parent] [git-clone-options] <repo> [<dir>]
Clone a repository by name"""
if not repo:
repo = self.repository(opts)
url = self.clone_url(repo, opts)
args = opts['extra-opts']
args.append(url)
dir = opts['<dir>'] and opts['<dir>'][0] or repo.name
if '--bare' in args:
dir += '.git'
args.append(dir)
self.gitm('clone', *args, redirect=False).returncode
if repo.fork:
os.chdir(dir)
self.set_origin(opts, repo=repo)
@command
def collaborators(self, opts):
"""[<repo>]
List collaborators of a repository"""
repo = self.repository(opts)
users = list(repo.collaborators())
users.sort(key = lambda user: user.login)
for user in users:
print(user.login)
@command
def create(self, opts):
"""[--private] [--org=<org>] [--description=<description>]
Create a repository on github to push to"""
root = self.gitm('rev-parse', '--show-toplevel').stdout.strip()
name = os.path.basename(root)
if opts['--org']:
dest = self.gh.organization(opts['--org'])
ns = opts['--org']
else:
dest = self.gh
ns = self.my_login
if name in [x.name for x in dest.repositories() if x.owner.login == ns]:
err("Repository already exists")
repo = dest.create_repository(name=name, description=opts['--description'] or "", private=opts['--private'])
if 'origin' in self.remotes():
print("Remote 'origin' already exists, adding the GitHub repository as 'github'")
self.set_origin(opts, repo=repo, remote='github')
else:
self.set_origin(opts, repo=repo)
@command
def create_token(self, opts):
"""[--store]
Create a personal access token that can be used for git operations"""
password = getpass.getpass("GitHub password: ")
scopes = ['repo']
name = "Git on %s" % socket.gethostname()
host = self.config('host')
if host and host not in ('https://api.github.com', 'api.github.com'):
if not host.startswith(('http://', 'https://')):
host = 'https://' + host
gh = github3.GitHubEnterprise(url=host)
else:
gh = github3.GitHub()
gh.login(self.my_login, password, two_factor_callback=lambda: prompt_for_2fa(self.my_login))
try:
auth = gh.authorize(self.my_login, password, scopes, name, "http://git-scm.com")
except github3.GitHubError:
type, exc = sys.exc_info()[:2]
dont_raise = False
if hasattr(exc, 'response') and exc.response.status_code == 422:
for error in exc.response.json()['errors']:
if error['resource'] == 'OauthAccess' and error['code'] == 'already_exists':
if os.getenv('DEBUG'):
for auth in gh.authorizations():
if auth.app['name'] in (name, '%s (API)' % name):
auth.delete()
auth = gh.authorize(self.my_login, password, scopes, name, "http://git-scm.com")
dont_raise=True
else:
err('An OAuth token for git on this host already exists. Please delete it on your setting page')
if not dont_raise:
raise
if auth is None:
err("Authentication failed")
token = auth.token
print("Your personal access token is: %s" % token)
if opts['--store']:
host = self.config('host') or 'github.com'
Credential(protocol='https', host=host, username=self.my_login, password=token).approve()
print("Your personal access token has been stored in the git credential helper")
@command
def deploy_keys(self, opts):
"""[<repo>]
Lists all keys for a repo"""
repo = self.repository(opts)
for key in repo.keys():
ro = 'ro' if key.read_only else 'rw'
print("%s %s (id: %s, %s)" % (key.key, key.title or '', key.id, ro))
@command
def edit_hook(self, opts):
"""<name> [<setting>...]
Edit a hook"""
for hook in self.repository(opts).hooks():
if hook.name == opts['<name>']:
break
else:
raise ValueError("Hook %s does not exist" % opts['<name>'])
settings = dict([x.split('=', 1) for x in opts['<setting>']])
for key in settings:
if settings[key].isdigit():
settings[key] = int(settings[key])
events = settings.pop('events', ','.join(hook.events)).split(',')
config = hook.config
config.update(settings)
hook.edit(config, events)
@command
def fetch(self, opts):
"""[--ssh|--http|--git] <user> [<refspec>]
Fetch refs from a user's fork"""
for fork in self.repository(opts).forks():
if fork.owner.login in opts['<user>']:
url = self.clone_url(fork, opts)
refspec = opts['<refspec>'] or 'refs/heads/*'
if ':' not in refspec:
if not refspec.startswith('refs/'):
refspec += ':' + 'refs/remotes/%s/' % fork.owner.login + refspec
else:
refspec += ':' + refspec.replace('refs/heads/', 'refs/remotes/%s/' % fork.owner.login)
self.gitm('fetch', url, refspec, redirect=False)
@command
def fork(self, opts):
"""[--ssh|--http|--git] [--triangular [--upstream-branch=<branch>]] [<repo>]
Fork a repo and clone it"""
do_clone = bool(opts['<repo>'])
repo = self.repository(opts)
if repo.owner.login == self.my_login:
err("You cannot fork your own repos")
if isinstance(repo, github3.gists.Gist):
for fork in repo.forks():
if fork.owner.login == self.my_login:
err("You already forked this gist as %s" % fork.html_url)
else:
if repo.name in [x.name for x in self.gh.repositories() if x.owner.login == self.my_login]:
err("Repository already exists")
my_clone = repo.create_fork()
self.wait_for_repo(my_clone.owner.login, my_clone.name, opts)
if do_clone:
self.clone(opts, repo=my_clone)
else:
self.set_origin(opts, repo=my_clone)
@command
@wants_parent
def forks(self, opts):
"""[<repo>]
List all forks of this repository"""
repo = self.repository(opts)
print("[%s] %s" % (wrap(repo.owner.login, attr.bright), repo.html_url))
for fork in repo.forks():
print("[%s] %s" % (fork.owner.login, fork.html_url))
@command
def gist(self, opts):
"""[--description=<description>] <file>...
Create a new gist from files or stdin"""
files = {}
description = opts['--description'] or ''
for f in opts['<file>']:
if f == '-':
files['stdout'] = {'content': sys.stdin.read()}
else:
if not os.path.exists(f):
err("No such file: %s" % f)
with open(f) as fd:
files[os.path.basename(f)] = {'content': fd.read()}
gist = self.gh.create_gist(description=description, files=files)
print("Gist created at %s" % gist.html_url)
@command
def gists(self, opts):
"""[<user>]
Show all gists for a user"""
user = (opts['<user>'] or [self.gh.me().login])[0]
for gist in self.gh.gists_by(user):
print("%s - %s" % (gist.html_url, gist.description))
@command
def hooks(self, opts):
"""\nShow hooks that have been enabled"""
for hook in self.repository(opts).hooks():
print(wrap("%s (%s)" % (hook.name, ', '.join(hook.events)), attr.bright))
for key, val in sorted(hook.config.items()):
if val in (None, ''):
continue
print(" %s: %s" % (key, val))
@command
def ignore(self, opts):
"""[<language>...]
Show gitignore patterns for one or more languages"""
lang = opts['<language>']
if not lang:
langs = sorted(self.gh.gitignore_templates(), key = lambda x: x.lower())
print("Languages for which a gitignore template is available:\n * " + "\n * ".join(langs))
else:
langs = {}
for l in lang:
try:
t = self.gh.gitignore_template(langs.get(l.lower(), l))
except github3.exceptions.NotFoundError:
if not langs:
langs = {x.lower(): x for x in self.gh.gitignore_templates()}
if l.lower() in langs:
t = self.gh.gitignore_template(langs[l.lower()])
else:
err("No gitignore template found for %s" % l)
print("# Ignore patterns for " + l)
print(t.strip())
@command
def ip_addresses(self, opts):
"""[--git] [--hooks] [--importer] [--pages]
Show the IP addresses for github.com services in CIDR format"""
ip_addresses = self.gh.meta()
for what in ('git', 'hooks', 'importer', 'pages'):
if opts['--' + what]:
print("\n".join(ip_addresses[what]))
@command
def issue(self, opts):
"""[<repo>] [--parent] [<issue>...]
Show issue details or report an issue"""
if opts['<repo>'] and opts['<repo>'].isdigit():
# Let's assume it's an issue
opts['<issue>'].insert(0, opts['<repo>'])
opts['<repo>'] = None
repo = self.repository(opts)
for issue_no in opts['<issue>']:
issue = repo.issue(issue_no)
if issue:
pr = issue.pull_request()
print(wrap(issue.title.encode(sys.stdout.encoding, errors='backslashreplace').decode(sys.stdout.encoding), attr.bright, attr.underline))
print(issue.body.encode(sys.stdout.encoding, errors='backslashreplace').decode(sys.stdout.encoding))
print(pr.html_url if pr else issue.html_url)
else:
print('No issue with id %s found in repository %s' % (issue_no, repo.full_name))
if not opts['<issue>']:
ext = ''
template = self.find_template(repo, 'ISSUE_TEMPLATE')
if template:
if '.' in template[0]:
ext = template[0][template[0].rfind('.'):]
body = template[1]
extra = None
else:
body = ""
extra = """Reporting an issue on %s/%s
Please describe the issue as clearly as possible. Lines starting with '#' will
be ignored, the first line will be used as title for the issue.""" % (repo.owner.login, repo.name)
title, body = self.edit_msg(None, body, extra, 'ISSUE_EDITMSG' + ext)
if not body:
err("Empty issue message")
try:
issue = repo.create_issue(title=title, body=body)
print("Issue %d created %s" % (issue.number, issue.html_url))
except:
filename = self.backup_message(title, body, 'issue-message-')
err("Failed to create an issue, the issue text has been saved in %s" % filename)
@command
def issues(self, opts):
"""[<repo>] [--parent] [<filter>...]
List issues in a repository"""
if opts['<repo>'] and '=' in opts['<repo>']:
opts['<filter>'].insert(0, opts['<repo>'])
opts['<repo>'] = None
if (not opts['<repo>'] and not self.in_repo) or opts['<repo>'] == '--':
repos = list(self.gh.repositories(type='all'))
else:
repos = [self.repository(opts)]
for repo in repos:
repo = (opts['--parent'] and self.parent_repo(repo)) or repo
filters = dict([x.split('=', 1) for x in opts['<filter>']])
try:
issues = list(repo.issues(**filters))
except github3.GitHubError:
_, err, _ = sys.exc_info()
if err.code == 410:
if len(repos) == 1:
print(err.message)
continue
else:
raise
if not issues:
continue
print(wrap("Issues for %s/%s" % (repo.owner.login, repo.name), attr.bright))
for issue in issues:
pr = issue.pull_request()
url = pr.html_url if pr else issue.html_url
print("[%d] %s %s" % (issue.number, issue.title.encode(sys.stdout.encoding, errors='backslashreplace').decode(sys.stdout.encoding), url))
@command
def log(self, opts):
"""[--type=<type>] [--count=<count>] [--verbose] [<what>]
Display github log for yourself or other users. Or for an organisation or a repo"""
logtype = 'user'
count = int(opts['--count'] or 30)
verbose = opts['--verbose']
if not opts['<what>']:
what = self.me
else:
if '/' in opts['<what>']:
logtype = 'repo'
user, repo = opts['<what>'].split('/', 1)
if user == 'gist':
what = self.gh.gist(repo)
if not what:
err("Gist %s does not exist" % repo)
else:
what = self.gh.repository(user, repo)
if not what:
err("Repository %s/%s does not exist" % (user, repo))
else:
what = self.gh.user(opts['<what>'])
if what.type == 'Organization':
logtype = 'org'
if not what:
err("User %s does not exist" % opts['<what>'])
if not opts['--type']:
events = [x for x in what.events(number=count)]
else:
events = []
etype = opts['--type'].lower() + 'event'
for event in what.events(number=-1):
if event.type.lower() == etype:
events.append(event)
if len(events) == count:
break
now = datetime.datetime.now()
for event in reversed(events):
ts = event.created_at
if ts.year == now.year:
if (ts.month, ts.day) == (now.month, now.day):
ts = wrap(ts.strftime("%H:%M"), attr.faint)
tss = ' '
else:
ts = wrap(ts.strftime("%m/%d %H:%M"), attr.faint)
tss = ' '
else:
ts = wrap(ts.strftime("%Y/%m/%d %H:%M"), attr.faint)
tss = ' '
repo = event.repo['name']
repo_ = ' (%s)' % repo
if logtype != 'user':
repo_ = ''
ts += ' %s' % event.actor.login
if event.type == 'CommitCommentEvent':
print("%s commented on commit %s%s" % (ts, event.payload['comment'].commit_id[:7], repo_))
elif event.type == 'CreateEvent':
if event.payload['ref_type'] == 'repository':
print("%s created %s %s" % (ts, event.payload['ref_type'], repo))
else:
print("%s created %s %s%s" % (ts, event.payload['ref_type'], event.payload['ref'], repo_))
elif event.type == 'DeleteEvent':
print("%s deleted %s %s%s" % (ts, event.payload['ref_type'], event.payload['ref'], repo_))
elif event.type == 'DownloadEvent':
print("%s created download %s (%s)" % (ts, event.payload['name'], event.payload['description']))
elif event.type == 'FollowEvent':
print("%s started following %s" % (ts, event.payload['target'].login))
elif event.type == 'ForkEvent':
print("%s forked %s to %s/%s" % (ts, repo, event.payload['forkee'].owner.login, event.payload['forkee'].name))
elif event.type == 'ForkApplyEvent':
print("%s applied %s to %s%s" % (ts, event.payload['after'][:7], event.payload['head'], repo_))
elif event.type == 'GistEvent':
print("%s %sd gist #%s" % (ts, event.payload['action'], event.payload['gist'].html_url))
elif event.type == 'GollumEvent':
pages = len(event.payload['pages'])
print("%s updated %d wikipage%s%s" % (ts, pages, {1:''}.get(pages, 's'), repo_))
elif event.type == 'IssueCommentEvent':
print("%s commented on issue #%s%s" % (ts, event.payload['issue'].number, repo_))
if verbose:
print("%s %s %s" % (tss, event.payload['issue'].title, event.payload['comment'].html_url))
elif event.type == 'IssuesEvent':
print("%s %s issue #%s%s" % (ts, event.payload['action'], event.payload['issue'].number, repo_))
if verbose:
print("%s %s %s" % (tss, event.payload['issue'].title, event.payload['issue'].html_url))
elif event.type == 'MemberEvent':
print("%s %s %s to %s" % (ts, event.payload['action'], event.payload['member'].login, repo))
elif event.type == 'PublicEvent':
print("%s made %s open source" % repo)
elif event.type == 'PullRequestReviewCommentEvent':
print("%s commented on a pull request for commit %s%s" % (ts, event.payload['comment'].commit_id[:7], repo_))
elif event.type == 'PullRequestEvent':
print("%s %s pull_request #%s%s" % (ts, event.payload['action'], event.payload['pull_request'].number, repo_))
if verbose:
print("%s %s %s" % (tss, event.payload['pull_request'].title, event.payload['pull_request'].html_url))
elif event.type == 'PushEvent':
# Old push events have shas and not commits
if 'commits' in event.payload:
commits = len(event.payload['commits'])
else:
commits = len(event.payload['shas'])
print("%s pushed %d commits to %s%s" % (ts, commits, event.payload['ref'][11:], repo_))
if verbose:
shas = '%s...%s' % (event.payload['before'][:8], event.payload['head'][:8])
print("%s %s/%s/compare/%s" % (tss, self.me.html_url, event.repo[1], shas))
elif event.type == 'ReleaseEvent':
print("%s released %s" % (ts, event.payload['name']))
elif event.type == 'StatusEvent':
print("%s commit %s changed to %s" % (ts, event.payload['sha'][:7], event.payload['state']))
elif event.type == 'TeamAddEvent':
if 'user' in event.payload:
what = 'user'
name = isinstance(event.payload['user'], dict) and event.payload['user']['name'] or event.payload['user'].name
else:
what = 'repository'
name = isinstance(event.payload['repository'], dict) and event.payload['repository']['name'] or event.payload['repository'].name
print("%s %s %s was added to team %s" % (ts, what, name, event.payload['team'].name))
elif event.type == 'WatchEvent':
print("%s %s watching %s" % (ts, event.payload['action'], repo))
elif event.type == 'GistHistoryEvent':
print("%s committed %s additions, %s deletions" % (ts, event.additions, event.deletions))
else:
print(wrap("Cannot display %s. Please file a bug at github.com/seveas/git-spindle\nincluding the following output:" % event.type, attr.bright))
pprint(event.payload)
@command
def ls(self, opts):
"""[<dir>...]
Display the contents of a directory on GitHub"""
for arg in opts['<dir>'] or ['']:
repo, ref, file = ([None, None] + arg.split(':',2))[-3:]
user = None
if repo:
user, repo = ([None] + repo.split('/'))[-2:]
repo = self.gh.repository(user or self.my_login, repo)
else:
repo = self.repository(opts)
file = self.rel2root(file)
content = repo.directory_contents(file, ref=ref or repo.default_branch)
if not content:
err("No such directory: %s" % arg)
content = sorted([v for k, v in content], key=lambda file: file.name)
mt = max([len(file.type) for file in content])
ms = max([len(str(file.size)) for file in content])
fmt = "%%(type)-%ds %%(size)-%ds %%(sha).7s %%(path)s" % (mt, ms)
for file in content:
print(fmt % file.__dict__)
@command
def mirror(self, opts):
"""[--ssh|--http|--git] [--goblet] [<repo>]
Mirror a repository, or all repositories for a user"""
if opts['<repo>'] and opts['<repo>'].endswith('/*'):
user = opts['<repo>'].rsplit('/', 2)[-2]
for repo in self.gh.repositories(type='all') if user == self.my_login else self.gh.repositories_by(user, type='all'):
if repo.owner.login != self.my_login:
continue
opts['<repo>'] = '%s/%s' % (user, repo)
self.mirror(opts)
for repo in self.gh.gists_by(user):
opts['<repo>'] = 'gist/%s' % repo.name
self.mirror(opts)
return
repo = self.repository(opts)
git_dir = repo.name + '.git'
cur_dir = os.path.basename(os.path.abspath(os.getcwd()))
if cur_dir != git_dir and not os.path.exists(git_dir):
url = self.clone_url(repo, opts)
self.gitm('clone', '--mirror', url, redirect=False)
else:
if git_dir == cur_dir:
git_dir = '.'
# Update the current, mirrored repo
if self.git('--git-dir', git_dir, 'config', 'core.bare').stdout.strip() != 'true' or \
self.git('--git-dir', git_dir, 'config', 'remote.origin.mirror').stdout.strip() != 'true':
err("This is not a mirrored repository")
self.gitm('--git-dir', git_dir, 'fetch', '-q', '--prune', 'origin', redirect=False)
with open(os.path.join(git_dir, 'description'), 'w') as fd:
if PY3:
fd.write(repo.description or "")
else:
fd.write((repo.description or "").encode('utf-8'))
if opts['--goblet']:
cwd = os.getcwd()
os.chdir(git_dir)
self.setup_goblet(opts)
os.chdir(cwd)
@command
def network(self, opts):
"""[<level>]
Create a graphviz graph of followers and forks"""
from collections import defaultdict
class P:
def __init__(self, user):
self.user = user
self.done = False
self.rel_to = defaultdict(list)
def __repr__(self):
return dict.__repr__(self.rel_to)
level = 1
if opts['<level>']:
try:
level = int(opts['<level>'])
except ValueError:
err("Integer argument required")
people = {self.my_login: P(self.me)}
for i in range(level):
for login, person in list(people.items()):
if person.done:
continue
sys.stderr.write("Looking at user %s\n" % login)
# Followers
for other in person.user.followers():
if other.login not in people:
people[other.login] = P(other)
people[other.login].rel_to[login].append('follows')
for other in person.user.following():
if other.login not in people:
people[other.login] = P(other)
person.rel_to[other.login].append('follows')
# Forks
for repo in self.gh.repositories_by(login, type='owner'):
sys.stderr.write("Looking at repo %s\n" % repo.name)
if repo.fork:
if repo.owner.login not in people:
people[repo.owner.login] = P(repo.owner)
parent = self.parent_repo(repo)
person.rel_to[parent.owner.login].append('forked %s' % parent.name)
else:
for fork in repo.forks():
if fork.owner.login == login:
continue
if fork.owner.login not in people:
people[fork.owner.login] = P(fork.owner)
people[fork.owner.login].rel_to[login].append('forked %s' % repo.name)
person.done = True
# Now we create a graph
graph = ["digraph network {"]
for person in people:
graph.append(' "%s"' % person)
for login, person in people.items():
for other, types in person.rel_to.items():
graph.append(' "%s" -> "%s" [label="%s"]' % (login, other, "\\n".join(types)))
graph.append("}")
print("\n".join(graph))
@command
def protect(self, opts):
"""[--enforcement=<level>] [--status-checks=<contexts>] <branch>
Protect a branch against deletions, force-pushes and failed status checks"""
repo = self.repository(opts)
repo.branch(opts['<branch>']).protect(enforcement=opts['--enforcement'],
status_checks=(opts['--status-checks'] or '').split(','))
@command
def protected(self, opts):
"""\nList active branch protections"""
repo = self.repository(opts)
for branch in repo.branches(protected=True):
data = branch.protection
msg = branch.name
if data['required_status_checks']['contexts'] and data['required_status_checks']['enforcement_level'] != 'off':
msg += ' (%s must pass for %s)' % (','.join(data['required_status_checks']['contexts']), data['required_status_checks']['enforcement_level'])
print(msg)
@command
def public_keys(self, opts):
"""[<user>]
Lists all keys for a user"""
user = opts['<user>'] and opts['<user>'][0] or self.my_login
if self.my_login == user:
keys = self.gh.keys()
else:
keys = self.gh.user(user).keys()
for key in keys:
print("%s %s" % (key.key, getattr(key, 'title', '')))
@command
def pull_request(self, opts):
"""[--issue=<issue>] [--yes] [<yours:theirs>]
Opens a pull request to merge your branch to an upstream branch"""
repo = self.repository(opts)
parent = self.parent_repo(repo) or repo
# Which branch?
src = opts['<yours:theirs>'] or ''
dst = None
if ':' in src:
src, dst = src.split(':', 1)
if not src:
src = self.gitm('rev-parse', '--abbrev-ref', 'HEAD').stdout.strip()
if not dst:
dst = parent.default_branch
tracking_branch = self.git('rev-parse', '--symbolic-full-name', '%s@{u}' % src).stdout.strip()
if tracking_branch.startswith('refs/remotes/'):
tracking_remote, tracking_branch = tracking_branch.split('/', 3)[-2:]
if tracking_branch != src or repo.remote != tracking_remote:
# Interesting. We're not just tracking a branch in our clone!
dst = tracking_branch
if src == dst and parent == repo:
err("Cannot file a pull request on the same branch")
# Try to get the local commit
commit = self.gitm('show-ref', 'refs/heads/%s' % src).stdout.split()[0]
# Do they exist on github?
try:
srcb = repo.branch(src)
except github3.exceptions.NotFoundError:
srcb = None
if not srcb:
if self.question("Branch %s does not exist in your GitHub repo, shall I push?" % src):
self.gitm('push', '-u', repo.remote, src, redirect=False)
else:
err("Aborting")
elif srcb and srcb.commit.sha != commit:
# Have we diverged? Then there are commits that are reachable from the github branch but not local
diverged = self.gitm('rev-list', srcb.commit.sha, '^' + commit)
if diverged.stderr or diverged.stdout:
if self.question("Branch %s has diverged from GitHub, shall I push and overwrite?" % src, default=False):
self.gitm('push', '--force', repo.remote, src, redirect=False)
else:
err("Aborting")
else:
if self.question("Branch %s not up to date on github, but can be fast forwarded, shall I push?" % src):
self.gitm('push', repo.remote, src, redirect=False)
else:
err("Aborting")
dstb = parent.branch(dst)
if not dstb:
err("Branch %s does not exist in %s/%s" % (dst, parent.owner.login, parent.name))
# Do we have the dst locally?
for remote in self.gitm('remote').stdout.strip().split("\n"):
url = self.gitm('config', 'remote.%s.url' % remote).stdout.strip()
if url in [parent.git_url, parent.ssh_url, parent.clone_url]:
if parent.private and url != parent.ssh_url:
err("You should configure %s/%s to fetch via ssh, it is a private repo" % (parent.owner.login, parent.name))
self.gitm('fetch', remote, redirect=False)
break
else:
err("You don't have %s/%s configured as a remote repository" % (parent.owner.login, parent.name))
# How many commits?
accept_empty_body = False
commits = try_decode(self.gitm('log', '--pretty=%H', '%s/%s..%s' % (remote, dst, src)).stdout).strip().split()
commits.reverse()
if not commits:
err("Your branch has no commits yet")
# Are we turning an issue into a commit?
if opts['--issue']:
pull = parent.create_pull_from_issue(base=dst, head='%s:%s' % (repo.owner.login, src), issue=int(opts['--issue']))
print("Pull request %d created %s" % (pull.number, pull.html_url))
return
# 1 commit: title/body from commit
if len(commits) == 1:
title, body = self.gitm('log', '--pretty=%s\n%b', '%s^..%s' % (commits[0], commits[0])).stdout.split('\n', 1)
title = title.strip()
body = body.strip()
accept_empty_body = not bool(body)
# More commits: title from branchname (titlecased, s/-/ /g), body comments from shortlog
else:
title = src
if '/' in title:
title = title[title.rfind('/') + 1:]
title = title.title().replace('-', ' ')
body = ""
ext = ''
template = self.find_template(repo, 'PULL_REQUEST_TEMPLATE')
if template:
if '.' in template[0]:
ext = template[0][template[0].rfind('.'):]
body = template[1].rstrip() + '\n\n' + body
extra = """Requesting a pull from %s/%s into %s/%s
Please enter a message to accompany your pull request. Lines starting
with '#' will be ignored, and an empty message aborts the request.""" % (repo.owner.login, src, parent.owner.login, dst)
extra += "\n\n " + try_decode(self.gitm('shortlog', '%s/%s..%s' % (remote, dst, src)).stdout).strip()
extra += "\n\n " + try_decode(self.gitm('diff', '--stat', '%s^..%s' % (commits[0], commits[-1])).stdout).strip()
title, body = self.edit_msg(title, body, extra, 'PULL_REQUEST_EDIT_MSG' + ext)
if not body and not accept_empty_body:
err("No pull request message specified")
try:
pull = parent.create_pull(base=dst, head='%s:%s' % (repo.owner.login, src), title=title, body=body)
print("Pull request %d created %s" % (pull.number, pull.html_url))
except:
filename = self.backup_message(title, body, 'pull-request-message-')
err("Failed to create a pull request, the pull request text has been saved in %s" % filename)
@command
def readme(self, opts):
"""[<repo>]
Get the README for a repository"""
repo = self.repository(opts)
readme = repo.readme()
if readme:
os.write(sys.stdout.fileno(), readme.decoded)
else:
err("No readme found")
@command
def release(self, opts):
"""[--draft] [--prerelease] <tag> [<releasename>]
Create a release"""
repo = self.repository(opts)
tag = opts['<tag>']
if tag.startswith('refs/tags/'):
tag = tag[10:]
name = opts['<releasename>'] or tag
ref = 'refs/tags/' + tag
ret = self.git('rev-parse', '--quiet', '--verify', ref + '^0')
if not ret:
err("Tag %s does not exist yet" % tag)
sha = ret.stdout.strip()
if not self.git('ls-remote', repo.remote, ref).stdout.strip():
if self.question("Tag %s does not exist in your GitHub repo, shall I push?" % tag):
self.gitm('push', repo.remote, '%s:%s' % (ref, ref), redirect=False)
body = ''
if self.git('cat-file', '-t', ref).stdout.strip() == 'tag':
body = self.git('--no-pager', 'log', '-1', '--format=%B', ref).stdout
extra = """Creating release %s based on tag %s
Please enter a text to accompany your release. Lines starting with '#'
will be ignored""" % (name, tag)
body = self.edit_msg(None, body, extra, 'RELEASE_TEXT', split_title=False)
release = repo.create_release(tag, target_commitish=sha, name=name, body=body, draft=opts['--draft'], prerelease=opts['--prerelease'])
print("Release '%s' created %s" % (release.name, release.html_url))
@command
def releases(self, opts):
"""[<repo>]
List all releases"""
repo = self.repository(opts)
for release in repo.releases():
status = []
if release.draft:
status.append('draft')
if release.prerelease:
status.append('prerelease')
status = status and ' ' + wrap(','.join(status), attr.faint) or ''
print("%s (%s)%s %s" % (release.name, release.tag_name, status, release.html_url))
@command
def remove_collaborator(self, opts):
"""<user>...
Remove a user as collaborator """
repo = self.repository(opts)
for user in opts['<user>']:
repo.remove_collaborator(user)
@command
def remove_deploy_key(self, opts):
"""<key>...
Remove deploy key by id"""
repo = self.repository(opts)
for key in opts['<key>']:
if not key.isdigit():
err("You must specify a numeric id")
try:
repo.key(key).delete()
except github3.exceptions.NotFoundError:
err("No key with id %s found" % key)
@command
def remove_hook(self, opts):
"""<name>
Remove a hook"""
for hook in self.repository(opts).hooks():
if hook.name == opts['<name>']:
hook.delete()
@command
def render(self, opts):
"""[--save=<outfile>] <file>
Render a markdown document"""
template = """<!DOCTYPE html>
<html>
<head>
<link type="text/css" rel="stylesheet" media="all" href="http://necolas.github.io/normalize.css/latest/normalize.css"></link>
<link type="text/css" rel="stylesheet" media="all" href="http://seveas.github.io/git-spindle/_static/github.css"></link>
<link type="text/css" rel="stylesheet" media="all" href="https://cdnjs.cloudflare.com/ajax/libs/octicons/2.0.2/octicons.css"></link>
</head>
<body>
<div class="container">
<div id="readme" class="boxed-group">
<h3><span class="octicon octicon-book"></span> %s</h3>
<article class="markdown-body">
%s
</article>
</div>
</div>
</body>
</html>"""
with open(opts['<file>'][0]) as fd:
data = fd.read()
rendered = github3.markdown(data)
if isinstance(rendered, bytes):
rendered = rendered.decode('utf-8')
rendered = rendered.replace('user-content-', '')
html = template % (os.path.basename(opts['<file>'][0]), rendered)
if opts['--save']:
with open(opts['--save'], 'w') as fd:
fd.write(html)
else:
with tempfile.NamedTemporaryFile(suffix='.html', delete=False) as fd:
fd.write(html.encode('utf-8'))
fd.close()
webbrowser.open('file://' + fd.name)
time.sleep(1)
os.remove(fd.name)
@command
def repos(self, opts):
"""[--no-forks] [<user>]
List all repos of a user, by default yours"""
if opts['<user>']:
repos = list(self.gh.repositories_by(opts['<user>'][0]))
else:
repos = list(self.gh.repositories(type='all'))
opts['<user>'] = [self.my_login]
if not repos:
return
maxlen = max([len(x.name) for x in repos])
maxstar = len(str(max([x.stargazers_count for x in repos])))
maxfork = len(str(max([x.forks_count for x in repos])))
maxwatch = len(str(max([x.watchers for x in repos])))
# XXX github support request filed: watchers is actually stars
#fmt = u"%%-%ds \u2605 %%-%ds \u25c9 %%-%ds \u2919 %%-%ds %%s" % (maxlen, maxstar, maxwatch, maxfork)
fmt = u"%%-%ds \u2605 %%-%ds \u2919 %%-%ds %%s" % (maxlen, maxstar, maxfork)
for repo in repos:
color = [attr.normal]
if repo.private:
color.append(fgcolor.red)
if repo.fork:
if opts['--no-forks']:
continue
color.append(attr.faint)
name = repo.name
if opts['<user>'][0] != repo.owner.login:
name = '%s/%s' % (repo.owner.login, name)
msg = wrap(fmt % (name, repo.stargazers_count, repo.forks_count, repo.description), *color)
if not PY3:
msg = msg.encode('utf-8')
print(msg)
@command
def say(self, opts):
"""[<msg>]
Let the octocat speak to you"""
msg = github3.octocat(opts['<msg>'] or None)
if isinstance(msg, bytes):
msg = msg.decode('utf-8')
print(msg)
@command
@wants_parent
def setup_goblet(self, opts):
"""\nSet up goblet config based on GitHub config"""
repo = self.repository(opts)
repo = self.parent_repo(repo) or repo
owner = self.gh.user(repo.owner.login)
self.gitm('config', 'goblet.owner-name', owner.name.encode('utf-8') or owner.login)
if owner.email:
self.gitm('config', 'goblet.owner-mail', owner.email.encode('utf-8'))
self.gitm('config', 'goblet.git-url', repo.git_url)
self.gitm('config', 'goblet.http-url', repo.clone_url)
goblet_dir = os.path.join(self.gitm('rev-parse', '--git-dir').stdout.strip(), 'goblet')
if not os.path.exists(goblet_dir):
os.mkdir(goblet_dir, 0o777)
os.chmod(goblet_dir, 0o777)
@command
def set_origin(self, opts, repo=None, remote='origin'):
"""[--ssh|--http|--git] [--triangular [--upstream-branch=<branch>]]
Set the remote 'origin' to github.
If this is a fork, set the remote 'upstream' to the parent"""
if not repo:
repo = self.repository(opts)
# Is this mine? No? Do I have a clone?
if repo.owner.login != self.my_login:
my_repo = self.gh.repository(self.me, repo.name)
if my_repo:
repo = my_repo
url = self.clone_url(repo, opts)
if self.git('config', 'remote.%s.url' % remote).stdout.strip() != url:
print("Pointing %s to %s" % (remote, url))
self.gitm('config', 'remote.%s.url' % remote, url)
self.gitm('config', '--replace-all', 'remote.%s.fetch' % remote, '+refs/heads/*:refs/remotes/%s/*' % remote)
if repo.fork:
parent = self.parent_repo(repo)
url = self.clone_url(parent, opts)
if self.git('config', 'remote.upstream.url').stdout.strip() != url:
print("Pointing upstream to %s" % url)
self.gitm('config', 'remote.upstream.url', url)
self.gitm('config', 'remote.upstream.fetch', '+refs/heads/*:refs/remotes/upstream/*')
else:
# If issues are enabled, fetch pull requests
try:
list(repo.issues(number=1))
except github3.GitHubError:
pass
else:
self.gitm('config', '--add', 'remote.%s.fetch' % remote, '+refs/pull/*/head:refs/pull/*/head')
if self.git('ls-remote', remote).stdout.strip():
self.gitm('fetch', remote, redirect=False)
if repo.fork:
self.gitm('fetch', 'upstream', redirect=False)
if remote != 'origin':
return
self.set_tracking_branches(remote, upstream="upstream", triangular=opts['--triangular'], upstream_branch=opts['--upstream-branch'])
@command
def status(self, opts):
"""\nDisplay current and historical GitHub service status"""
api = github3.GitHubStatus()
messages = api.messages()
if not messages:
messages = [api.last_message()]
status = api.status()
status.update({
'body': 'Current status: %s' % status['status'],
'created_on': status['last_updated'],
})
messages.insert(0, status)
for message in reversed(messages):
ts = time.strptime(message['created_on'], '%Y-%m-%dT%H:%M:%SZ')
offset = time.timezone
if time.daylight:
offset = time.altzone
color = {'good': fgcolor.green, 'minor': fgcolor.yellow, 'major': fgcolor.red}[message['status']]
ts = datetime.datetime(ts.tm_year, ts.tm_mon, ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec) - datetime.timedelta(0,offset)
print('%s %s %s' % (wrap(ts.strftime('%Y-%m-%d %H:%M'), attr.faint), wrap("%-5s" % message['status'], color), message['body']))
@command
def unprotect(self, opts):
""" <branch>
Remove branch protections from a branch"""
repo = self.repository(opts)
repo.branch(opts['<branch>']).unprotect()
@command
def whoami(self, opts):
"""\nDisplay GitHub user info"""
opts['<user>'] = [self.my_login]
self.whois(opts)
@command
def whois(self, opts):
"""<user>...
Display GitHub user info"""
for user_ in opts['<user>']:
user = self.gh.user(user_)
if not user:
print("No such user: %s" % user_)
continue
emails = {}
if user.login == self.my_login:
for email in self.gh.emails():
emails[email.email] = email
print(wrap(user.name or user.login, attr.bright, attr.underline))
print('Profile %s' % user.html_url)
if user.email:
unverified = ''
if user.email in emails and not emails[user.email].verified:
unverified = ' ' + wrap('(not verified)', fgcolor.red, attr.bright)
print('Email %s%s' % (user.email, unverified))
for email in emails:
if email == user.email:
continue
unverified = ''
if not emails[email].verified:
unverified = ' ' + wrap('(not verified)', fgcolor.red, attr.bright)
print(' %s%s' % (email, unverified))
if user.blog:
print('Blog %s' % user.blog)
if user.location:
print('Location %s' % user.location)
if user.company:
print('Company %s' % user.company)
print('Repos %d' % user.public_repos_count)
print('Gists %d' % user.public_gists_count)
if user.login == self.my_login:
keys = self.gh.keys()
else:
keys = user.keys()
for pkey in keys:
algo, key = pkey.key.split()[:2]
algo = algo[4:].upper()
if getattr(pkey, 'title', None):
print("%s key%s...%s (%s)" % (algo, ' ' * (6 - len(algo)), key[-10:], pkey.title))
else:
print("%s key%s...%s" % (algo, ' ' * (6 - len(algo)), key[-10:]))
if user.login == self.my_login:
orgs = self.gh.organizations()
else:
orgs = list(user.organizations())
if orgs:
print("Member of %s" % ', '.join([x.login for x in orgs]))
if user.type == 'Organization':
print('Members:')
for member in self.gh.organization(user.login).members():
print(" - %s" % member.login)
def prompt_for_2fa(user, cache={}):
"""Callback for github3.py's 2FA support."""
if cache.get(user, (0,))[0] < time.time() - 30:
code = raw_input("Two-Factor Authentication Code: ").strip()
cache[user] = (time.time(), code)
return cache[user][1]
| []
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | python | 1 | 0 | |
setup.py | import asyncio
import os
import mongoengine
from dotenv import find_dotenv, load_dotenv
from data.model.guild import Guild
load_dotenv(find_dotenv())
async def setup():
print("STARTING SETUP...")
guild = Guild()
# you should have this setup in the .env file beforehand
guild._id = int(os.environ.get("MAIN_GUILD_ID"))
# If you're re-running this script to update a value, set case_id
# to the last unused case ID or else it will start over from 1!
guild.case_id = 1
# required for permissions framework!
guild.role_administrator = 123 # put in the role IDs for your server here
guild.role_moderator = 123 # put in the role IDs for your server here
guild.role_birthday = 123 # put in the role IDs for your server here
guild.role_sub_mod = 123 # put in the role IDs for your server here
guild.role_genius = 123 # put in the role IDs for your server here
guild.role_dev = 123 # put in the role IDs for your server here
guild.role_memberone = 123 # put in the role IDs for your server here
guild.role_memberedition = 123 # put in the role IDs for your server here
guild.role_memberpro = 123 # put in the role IDs for your server here
guild.role_memberplus = 123 # put in the role IDs for your server here
# not required if you don't want the /subnews command
guild.role_sub_news = 123 # put in the role IDs for your server here
guild.channel_reports = 123 # put in the channel IDs for your server here
# channel where reactions will be logged
guild.channel_emoji_log = 123 # put in the channel IDs for your server here
# channel for private mod logs
guild.channel_private = 123 # put in the channel IDs for your server here
# channel where self-assignable roles will be posted
guild.channel_reaction_roles = 123 # put in the channel IDs for your server here
# rules-and-info channel
guild.channel_rules = 123 # put in the channel IDs for your server here
# not required
guild.channel_applenews = 123 # put in the channel IDs for your server here
# channel for public mod logs
guild.channel_public = 123 # put in the channel IDs for your server here
# optional, used for /subnrews command or something
guild.channel_subnews = 123 # put in the channel IDs for your server here
# optional, required for /issue command
guild.channel_common_issues = 123 # put in the channel IDs for your server here
# #general, required for permissions
guild.channel_general = 123 # put in the channel IDs for your server here
# required for filter
guild.channel_development = 123 # put in the channel IDs for your server here
# required, #bot-commands channel
guild.channel_botspam = 123 # put in the channel IDs for your server here
# optional, needed for booster #emote-suggestions channel
guild.channel_booster_emoji = 123 # put in the channel IDs for your server here
# you can fill these in if you want with IDs, or you ca use commands later
guild.logging_excluded_channels = [] # put in a channel if you want (ignored in logging)
guild.filter_excluded_channels = [] # put in a channel if you want (ignored in filter)
guild.filter_excluded_guilds = [] # put guild ID to whitelist in invite filter if you want
guild.nsa_guild_id = 123 # you can leave this as is if you don't want Blootooth (message mirroring system)
guild.save()
print("DONE")
if __name__ == "__main__":
if os.environ.get("DB_CONNECTION_STRING") is None:
mongoengine.register_connection(
host=os.environ.get("DB_HOST"), port=int(os.environ.get("DB_PORT")), alias="default", name="botty")
else:
mongoengine.register_connection(
host=os.environ.get("DB_CONNECTION_STRING"), alias="default", name="botty")
res = asyncio.get_event_loop().run_until_complete( setup() )
| []
| []
| [
"DB_PORT",
"MAIN_GUILD_ID",
"DB_HOST",
"DB_CONNECTION_STRING"
]
| [] | ["DB_PORT", "MAIN_GUILD_ID", "DB_HOST", "DB_CONNECTION_STRING"] | python | 4 | 0 | |
k8sclient/k8s_client_limit_range.go | package k8sclient
import (
"os"
"strconv"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"log"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
)
func CreateLimitRange(namespace string) {
if namespace == "" {
return
}
enableLimitRanges, err := strconv.ParseBool(os.Getenv("ENABLE_LIMITRANGES"))
if err != nil || !enableLimitRanges {
return
}
defaultCpuReq, err := strconv.Atoi(os.Getenv("DEFAULT_CPU_REQ"))
if err != nil || defaultCpuReq <= 0 {
defaultCpuReq = 20
}
defaultCpuReqQty := resource.NewQuantity(int64(defaultCpuReq), resource.BinarySI)
defaulCpuLimit, err := strconv.Atoi(os.Getenv("DEFAULT_CPU_LIM"))
if err != nil || defaulCpuLimit <= 0 {
defaulCpuLimit = 150
}
defaultCpuLimitQty := resource.NewQuantity(int64(defaulCpuLimit), resource.BinarySI)
defaultMemReq, err := strconv.Atoi(os.Getenv("DEFAULT_MEM_REQ"))
if err != nil || defaultMemReq <= 0 {
defaultMemReq = 25
}
defaultMemReqQty := resource.NewQuantity(int64(defaultMemReq), resource.BinarySI)
defaultMemLimit, err := strconv.Atoi(os.Getenv("DEFAULT_MEM_LIM"))
if err != nil || defaultMemLimit <= 0 {
defaultMemLimit = 120
}
defaultMemLimitQty := resource.NewQuantity(int64(defaultMemLimit), resource.BinarySI)
// build LimitRange
lR := &v1.LimitRange{Spec: v1.LimitRangeSpec{
Limits:[]v1.LimitRangeItem{
{Type: "Container",
DefaultRequest: v1.ResourceList{v1.ResourceMemory: *defaultMemReqQty, v1.ResourceCPU: *defaultCpuReqQty},
Default: v1.ResourceList{v1.ResourceMemory: *defaultMemLimitQty, v1.ResourceCPU: *defaultCpuLimitQty}},
}}}
// write to Cluster
client := getK8sClient()
_, err = client.CoreV1().LimitRanges(namespace).Create(lR)
if err != nil && !k8serrors.IsAlreadyExists(err) {
log.Fatalf("Error creating LimitRange for Namespace %s. Error was: %s", namespace, err.Error())
}
} | [
"\"ENABLE_LIMITRANGES\"",
"\"DEFAULT_CPU_REQ\"",
"\"DEFAULT_CPU_LIM\"",
"\"DEFAULT_MEM_REQ\"",
"\"DEFAULT_MEM_LIM\""
]
| []
| [
"ENABLE_LIMITRANGES",
"DEFAULT_CPU_REQ",
"DEFAULT_MEM_REQ",
"DEFAULT_CPU_LIM",
"DEFAULT_MEM_LIM"
]
| [] | ["ENABLE_LIMITRANGES", "DEFAULT_CPU_REQ", "DEFAULT_MEM_REQ", "DEFAULT_CPU_LIM", "DEFAULT_MEM_LIM"] | go | 5 | 0 | |
selfdrive/manager.py | #!/usr/bin/env python3.5
# manager will start all requird processes (Python, C, C++)
# for testing see also https://medium.com/@comma_ai/open-sourcing-openpilot-development-tools-a5bc427867b6
import os
import sys
import fcntl
import errno
import signal
import subprocess
#import logging
#instead of setting the PYTHONPATH, it is better to set it here:
sys.path.append("/home/pi/openpilot")
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode("utf-8"))
except (OSError, IOError):
pass
os._exit(os.wait()[1])
# update NEOS routine
# not required on Rpi
if __name__ == "__main__":
neos_update_required = os.path.isfile("/init.qcom.rc") \
and (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 8)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
os.system(os.path.join(BASEDIR, "installer", "updater", "updater"))
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
import zmq
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
cloudlog.info('Cloudlog info level is activated')
# comment out anything you don't want to run
# compilation in orb is an issue because it uses screen functions and an include file that is not available in headless env
# loggerd is compiled stuff so dont run it, its only logging anyway...
# gpsd replaced by gps.py
# and geofgence added
managed_processes = {
"thermald": "selfdrive.thermald",
# "uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"radard": "selfdrive.controls.radard",
# "ubloxd": "selfdrive.locationd.ubloxd",
# "mapd": "selfdrive.mapd.mapd",
# "loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
# "tombstoned": "selfdrive.tombstoned",
# "logcatd": ("selfdrive/logcatd", ["./logcatd"]),
# "proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": "selfdrive.boardd.boardd", # use python version
# "boardd": ("selfdrive/boardd", ["./boardd"]), # use python version
"pandad": "selfdrive.pandad",
# "ui": ("selfdrive/ui", ["./start.sh"]),
"calibrationd": "selfdrive.locationd.calibrationd",
# "visiond": ("selfdrive/visiond", ["./visiond"]),
# "sensord": ("selfdrive/sensord", ["./sensord"]),
# "gpsd": ("selfdrive/sensord", ["./gpsd"]),
"gpsd": "selfdrive.sensord.gps",
"geofence": "selfdrive.sensord.geofence",
# "orbd": ("selfdrive/orbd", ["./orbd_wrapper.sh"]),
# "updated": "selfdrive.updated",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'gpsd',
'geofence',
'updated',
]
car_started_processes = [
'controlsd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'visiond',
'proclogd',
'ubloxd',
'orbd',
'mapd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
# native processesmay fail
try:
os.execvp(pargs[0], pargs)
except OSError:
cloudlog.info("Warning: native process not started: " + pargs[0] + " in directory " + cwd)
def start_managed_process(name):
if name in running or name not in managed_processes:
# cloudlog.info("name not in managed processes: %s" % name)
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
# strange, why try again ?? comment out
# subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("Killing " + name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
global gctx
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
dongle_id_str = dongle_id.decode()
cloudlog.info("dongle id is " + dongle_id_str)
os.environ['DONGLE_ID'] = dongle_id_str
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id_str, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id_str)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
#--- manager thread --------------------------------------------
def manager_thread():
# now loop
context = zmq.Context()
thermal_sock = messaging.sub_sock(context, service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
# this is closed software so it cannot run on a RPi
logger_dead = False
try:
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
except OSError:
# no worries, it is only logging
logger_dead = True
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
cloudlog.info("start pandad and boardd")
start_managed_process("pandad")
params = Params()
while 1:
# read thermal msg to check cpu temperature and free space
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
# if thermal msg is available, start all car_started processes
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check if pandad has finished and boardd is not running yet
# pandad is updating the panda module and needs to be finished before we can start boardd
# a process gives exit code 0 if it ended correctly
# exit code == None is process is still running
if 'pandad' in running and 'boardd' not in running:
if running['pandad'].exitcode == 0:
start_managed_process('boardd')
# check the status of all processes, did any of them die?
# and minimize number of logmessages
cloudMsg = "Running: "
for p in running:
cloudMsg = cloudMsg + " %s %s, " % (p, running[p])
# cloudlog.debug(" Running %s %s" % (p, running[p]))
cloudlog.info(cloudMsg)
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
# use pm command to list all available packages, not required on Rpi
try:
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
except FileNotFoundError:
# make empty list
dat = []
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
# cereal is capnp stuff for rpc calls to c++ and java
# subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def xstr(s):
return '' if s is None else str(s)
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
cloudlog.info('NOLOG=' + xstr(os.getenv("NOLOG")))
cloudlog.info('NOUPLOAD=' + xstr(os.getenv("NOUPLOAD")))
cloudlog.info('NOVISION=' + xstr(os.getenv("NOVISION")))
cloudlog.info('LEAN=' + xstr(os.getenv("LEAN")))
cloudlog.info('NOCONTROL=' + xstr(os.getenv("NOCONTROL")))
cloudlog.info('PASSIVE=' + xstr(os.getenv("PASSIVE")))
cloudlog.info('PREPAREONLY=' + xstr(os.getenv("PREPAREONLY")))
cloudlog.info('BASEDIR=' + xstr(os.getenv("BASEDIR")))
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("GeoFence") is None:
params.put("GeoFence", "")
if params.get("UploadWebsite") is None:
params.put("UploadWebsite", "")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
cloudlog.info('Try to start C executable Spinner=' + spinner_text)
# TODO: add try/
try:
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
except OSError:
cloudlog.info('C executable Spinner falied with OSError')
spinner_proc = False
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
cloudlog.info('Start main()')
main()
# manual exit because we are forked
sys.exit(0)
| []
| []
| [
"BASEDIR",
"CLEAN",
"NOBOARD",
"NOUPLOAD",
"PASSIVE",
"PREPAREONLY",
"LEAN",
"NOVISION",
"DONGLE_ID",
"NOLOG",
"NOCONTROL"
]
| [] | ["BASEDIR", "CLEAN", "NOBOARD", "NOUPLOAD", "PASSIVE", "PREPAREONLY", "LEAN", "NOVISION", "DONGLE_ID", "NOLOG", "NOCONTROL"] | python | 11 | 0 | |
pipenv/patched/piptools/repositories/pypi.py | # coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import hashlib
import os
from contextlib import contextmanager
from functools import partial
from shutil import rmtree
from packaging.requirements import Requirement
from packaging.specifiers import Specifier, SpecifierSet
from .._compat import (
FAVORITE_HASH,
PIP_VERSION,
InstallationError,
InstallRequirement,
Link,
PyPI,
RequirementSet,
RequirementTracker,
Resolver as PipResolver,
SafeFileCache,
TemporaryDirectory,
VcsSupport,
Wheel,
WheelCache,
contextlib,
is_dir_url,
is_file_url,
is_vcs_url,
path_to_url,
pip_version,
url_to_path,
)
from ..cache import CACHE_DIR
from ..click import progressbar
from ..exceptions import NoCandidateFound
from ..logging import log
from ..utils import (
dedup,
clean_requires_python,
create_install_command,
fs_str,
is_pinned_requirement,
is_url_requirement,
lookup_table,
make_install_requirement,
)
from .base import BaseRepository
os.environ["PIP_SHIMS_BASE_MODULE"] = str("pipenv.patched.notpip")
FILE_CHUNK_SIZE = 4096
FileStream = collections.namedtuple("FileStream", "stream size")
class HashCache(SafeFileCache):
"""Caches hashes of PyPI artifacts so we do not need to re-download them
Hashes are only cached when the URL appears to contain a hash in it and the cache key includes
the hash value returned from the server). This ought to avoid ssues where the location on the
server changes."""
def __init__(self, *args, **kwargs):
session = kwargs.pop('session')
self.session = session
kwargs.setdefault('directory', os.path.join(CACHE_DIR, 'hash-cache'))
super(HashCache, self).__init__(*args, **kwargs)
def get_hash(self, location):
# if there is no location hash (i.e., md5 / sha256 / etc) we on't want to store it
hash_value = None
vcs = VcsSupport()
orig_scheme = location.scheme
new_location = copy.deepcopy(location)
if orig_scheme in vcs.all_schemes:
new_location.url = new_location.url.split("+", 1)[-1]
can_hash = new_location.hash
if can_hash:
# hash url WITH fragment
hash_value = self.get(new_location.url)
if not hash_value:
hash_value = self._get_file_hash(new_location) if not new_location.url.startswith("ssh") else None
hash_value = hash_value.encode('utf8') if hash_value else None
if can_hash:
self.set(new_location.url, hash_value)
return hash_value.decode('utf8') if hash_value else None
def _get_file_hash(self, location):
h = hashlib.new(FAVORITE_HASH)
with open_local_or_remote_file(location, self.session) as (fp, size):
for chunk in iter(lambda: fp.read(8096), b""):
h.update(chunk)
return ":".join([FAVORITE_HASH, h.hexdigest()])
class PyPIRepository(BaseRepository):
DEFAULT_INDEX_URL = PyPI.simple_url
"""
The PyPIRepository will use the provided Finder instance to lookup
packages. Typically, it looks up packages on PyPI (the default implicit
config), but any other PyPI mirror can be used if index_urls is
changed/configured on the Finder.
"""
def __init__(self, pip_args, session=None, build_isolation=False, use_json=False):
self.build_isolation = build_isolation
self.use_json = use_json
# Use pip's parser for pip.conf management and defaults.
# General options (find_links, index_url, extra_index_url, trusted_host,
# and pre) are deferred to pip.
command = create_install_command()
self.options, _ = command.parse_args(pip_args)
if session is None:
session = command._build_session(self.options)
self.session = session
self.finder = command._build_package_finder(
options=self.options, session=self.session
)
# Caches
# stores project_name => InstallationCandidate mappings for all
# versions reported by PyPI, so we only have to ask once for each
# project
self._available_candidates_cache = {}
# stores InstallRequirement => list(InstallRequirement) mappings
# of all secondary dependencies for the given requirement, so we
# only have to go to disk once for each requirement
self._dependencies_cache = {}
self._json_dep_cache = {}
# stores *full* path + fragment => sha256
self._hash_cache = HashCache(session=session)
# Setup file paths
self.freshen_build_caches()
self._download_dir = fs_str(os.path.join(CACHE_DIR, "pkgs"))
self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, "wheels"))
def freshen_build_caches(self):
"""
Start with fresh build/source caches. Will remove any old build
caches from disk automatically.
"""
self._build_dir = TemporaryDirectory(fs_str("build"))
self._source_dir = TemporaryDirectory(fs_str("source"))
@property
def build_dir(self):
return self._build_dir.name
@property
def source_dir(self):
return self._source_dir.name
def clear_caches(self):
rmtree(self._download_dir, ignore_errors=True)
rmtree(self._wheel_download_dir, ignore_errors=True)
def find_all_candidates(self, req_name):
if req_name not in self._available_candidates_cache:
candidates = self.finder.find_all_candidates(req_name)
self._available_candidates_cache[req_name] = candidates
return self._available_candidates_cache[req_name]
def find_best_match(self, ireq, prereleases=None):
"""
Returns a Version object that indicates the best match for the given
InstallRequirement according to the external repository.
"""
if ireq.editable or is_url_requirement(ireq):
return ireq # return itself as the best match
all_candidates = clean_requires_python(self.find_all_candidates(ireq.name))
candidates_by_version = lookup_table(
all_candidates, key=lambda c: c.version, unique=True
)
try:
matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),
prereleases=prereleases)
except TypeError:
matching_versions = [candidate.version for candidate in all_candidates]
# Reuses pip's internal candidate sort key to sort
matching_candidates = [candidates_by_version[ver] for ver in matching_versions]
if not matching_candidates:
raise NoCandidateFound(ireq, all_candidates, self.finder)
if PIP_VERSION < (19, 1):
best_candidate = max(
matching_candidates, key=self.finder._candidate_sort_key
)
elif PIP_VERSION < (19, 2):
evaluator = self.finder.candidate_evaluator
best_candidate = evaluator.get_best_candidate(matching_candidates)
elif PIP_VERSION < (19, 3):
evaluator = self.finder.make_candidate_evaluator(ireq.name)
best_candidate = evaluator.get_best_candidate(matching_candidates)
else:
evaluator = self.finder.make_candidate_evaluator(ireq.name)
best_candidate_result = evaluator.compute_best_candidate(
matching_candidates
)
best_candidate = best_candidate_result.best_candidate
# Turn the candidate into a pinned InstallRequirement
return make_install_requirement(
best_candidate.project,
best_candidate.version,
ireq.extras,
ireq.markers,
constraint=ireq.constraint,
)
def get_dependencies(self, ireq):
json_results = set()
if self.use_json:
try:
json_results = self.get_json_dependencies(ireq)
except TypeError:
json_results = set()
legacy_results = self.get_legacy_dependencies(ireq)
json_results.update(legacy_results)
return json_results
def get_json_dependencies(self, ireq):
if not (is_pinned_requirement(ireq)):
raise TypeError('Expected pinned InstallRequirement, got {}'.format(ireq))
def gen(ireq):
if self.DEFAULT_INDEX_URL not in self.finder.index_urls:
return
url = 'https://pypi.org/pypi/{0}/json'.format(ireq.req.name)
releases = self.session.get(url).json()['releases']
matches = [
r for r in releases
if '=={0}'.format(r) == str(ireq.req.specifier)
]
if not matches:
return
release_requires = self.session.get(
'https://pypi.org/pypi/{0}/{1}/json'.format(
ireq.req.name, matches[0],
),
).json()
try:
requires_dist = release_requires['info']['requires_dist']
except KeyError:
return
for requires in requires_dist:
i = InstallRequirement.from_line(requires)
if 'extra' not in repr(i.markers):
yield i
try:
if ireq not in self._json_dep_cache:
self._json_dep_cache[ireq] = [g for g in gen(ireq)]
return set(self._json_dep_cache[ireq])
except Exception:
return set()
def resolve_reqs(self, download_dir, ireq, wheel_cache):
results = None
ireq.isolated = self.build_isolation
ireq._wheel_cache = wheel_cache
if ireq and not ireq.link:
ireq.populate_link(self.finder, False, False)
if ireq.link and not ireq.link.is_wheel:
ireq.ensure_has_source_dir(self.source_dir)
if PIP_VERSION < (10,):
reqset = RequirementSet(
self.build_dir,
self.source_dir,
download_dir=download_dir,
wheel_download_dir=self._wheel_download_dir,
session=self.session,
ignore_installed=True,
ignore_compatibility=False,
wheel_cache=wheel_cache,
)
results = reqset._prepare_file(self.finder, ireq, ignore_requires_python=True)
else:
from pip_shims.shims import RequirementPreparer
preparer_kwargs = {
"build_dir": self.build_dir,
"src_dir": self.source_dir,
"download_dir": download_dir,
"wheel_download_dir": self._wheel_download_dir,
"progress_bar": "off",
"build_isolation": self.build_isolation,
}
resolver_kwargs = {
"finder": self.finder,
"session": self.session,
"upgrade_strategy": "to-satisfy-only",
"force_reinstall": False,
"ignore_dependencies": False,
"ignore_requires_python": True,
"ignore_installed": True,
"use_user_site": False,
"ignore_compatibility": False,
"use_pep517": True,
}
make_install_req_kwargs = {"isolated": False, "wheel_cache": wheel_cache}
if PIP_VERSION < (19, 3):
resolver_kwargs.update(**make_install_req_kwargs)
else:
from pipenv.vendor.pip_shims.shims import install_req_from_req_string
make_install_req = partial(
install_req_from_req_string, **make_install_req_kwargs
)
resolver_kwargs["make_install_req"] = make_install_req
del resolver_kwargs["use_pep517"]
if PIP_VERSION >= (20,):
preparer_kwargs["session"] = self.session
del resolver_kwargs["session"]
resolver = None
preparer = None
reqset = None
with RequirementTracker() as req_tracker:
# Pip 18 uses a requirement tracker to prevent fork bombs
if req_tracker:
preparer_kwargs["req_tracker"] = req_tracker
preparer = RequirementPreparer(**preparer_kwargs)
resolver_kwargs["preparer"] = preparer
reqset = RequirementSet()
ireq.is_direct = True
resolver = PipResolver(**resolver_kwargs)
require_hashes = False
if PIP_VERSION < (20,):
resolver.require_hashes = require_hashes
results = resolver._resolve_one(reqset, ireq)
else:
results = resolver._resolve_one(reqset, ireq, require_hashes)
try:
reqset.cleanup_files()
except (AttributeError, OSError):
pass
results = set(results) if results else set()
return results, ireq
def get_legacy_dependencies(self, ireq):
"""
Given a pinned, URL, or editable InstallRequirement, returns a set of
dependencies (also InstallRequirements, but not necessarily pinned).
They indicate the secondary dependencies for the given requirement.
"""
if not (
ireq.editable or is_url_requirement(ireq) or is_pinned_requirement(ireq)
):
raise TypeError(
"Expected url, pinned or editable InstallRequirement, got {}".format(
ireq
)
)
if ireq not in self._dependencies_cache:
if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)):
# No download_dir for locally available editable requirements.
# If a download_dir is passed, pip will unnecessarely
# archive the entire source directory
download_dir = None
elif ireq.link and is_vcs_url(ireq.link):
# No download_dir for VCS sources. This also works around pip
# using git-checkout-index, which gets rid of the .git dir.
download_dir = None
else:
download_dir = self._download_dir
if not os.path.isdir(download_dir):
os.makedirs(download_dir)
if not os.path.isdir(self._wheel_download_dir):
os.makedirs(self._wheel_download_dir)
wheel_cache = WheelCache(CACHE_DIR, self.options.format_control)
prev_tracker = os.environ.get("PIP_REQ_TRACKER")
try:
results, ireq = self.resolve_reqs(download_dir, ireq, wheel_cache)
self._dependencies_cache[ireq] = results
finally:
if "PIP_REQ_TRACKER" in os.environ:
if prev_tracker:
os.environ["PIP_REQ_TRACKER"] = prev_tracker
else:
del os.environ["PIP_REQ_TRACKER"]
# WheelCache.cleanup() introduced in pip==10.0.0
if PIP_VERSION >= (10,):
wheel_cache.cleanup()
return self._dependencies_cache[ireq]
def get_hashes(self, ireq):
"""
Given an InstallRequirement, return a set of hashes that represent all
of the files for a given requirement. Unhashable requirements return an
empty set. Unpinned requirements raise a TypeError.
"""
if ireq.link:
link = ireq.link
if is_vcs_url(link) or (is_file_url(link) and is_dir_url(link)):
# Return empty set for unhashable requirements.
# Unhashable logic modeled on pip's
# RequirementPreparer.prepare_linked_requirement
return set()
if is_url_requirement(ireq):
# Directly hash URL requirements.
# URL requirements may have been previously downloaded and cached
# locally by self.resolve_reqs()
cached_path = os.path.join(self._download_dir, link.filename)
if os.path.exists(cached_path):
cached_link = Link(path_to_url(cached_path))
else:
cached_link = link
return {self._hash_cache._get_file_hash(cached_link)}
if not is_pinned_requirement(ireq):
raise TypeError("Expected pinned requirement, got {}".format(ireq))
# We need to get all of the candidates that match our current version
# pin, these will represent all of the files that could possibly
# satisfy this constraint.
matching_candidates = (
c for c in clean_requires_python(self.find_all_candidates(ireq.name))
if c.version in ireq.specifier
)
log.debug(" {}".format(ireq.name))
def get_candidate_link(candidate):
if PIP_VERSION < (19, 2):
return candidate.location
return candidate.link
return {
h for h in
map(lambda c: self._hash_cache.get_hash(get_candidate_link(c)), matching_candidates)
if h is not None
}
@contextmanager
def allow_all_wheels(self):
"""
Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.
This also saves the candidate cache and set a new one, or else the results from
the previous non-patched calls will interfere.
"""
def _wheel_supported(self, tags=None):
# Ignore current platform. Support everything.
return True
def _wheel_support_index_min(self, tags=None):
# All wheels are equal priority for sorting.
return 0
original_wheel_supported = Wheel.supported
original_support_index_min = Wheel.support_index_min
original_cache = self._available_candidates_cache
Wheel.supported = _wheel_supported
Wheel.support_index_min = _wheel_support_index_min
self._available_candidates_cache = {}
try:
yield
finally:
Wheel.supported = original_wheel_supported
Wheel.support_index_min = original_support_index_min
self._available_candidates_cache = original_cache
@contextmanager
def open_local_or_remote_file(link, session):
"""
Open local or remote file for reading.
:type link: pip.index.Link
:type session: requests.Session
:raises ValueError: If link points to a local directory.
:return: a context manager to a FileStream with the opened file-like object
"""
url = link.url_without_fragment
if is_file_url(link):
# Local URL
local_path = url_to_path(url)
if os.path.isdir(local_path):
raise ValueError("Cannot open directory for read: {}".format(url))
else:
st = os.stat(local_path)
with open(local_path, "rb") as local_file:
yield FileStream(stream=local_file, size=st.st_size)
else:
# Remote URL
headers = {"Accept-Encoding": "identity"}
response = session.get(url, headers=headers, stream=True)
# Content length must be int or None
try:
content_length = int(response.headers["content-length"])
except (ValueError, KeyError, TypeError):
content_length = None
try:
yield FileStream(stream=response.raw, size=content_length)
finally:
response.close()
| []
| []
| [
"PIP_SHIMS_BASE_MODULE",
"PIP_REQ_TRACKER"
]
| [] | ["PIP_SHIMS_BASE_MODULE", "PIP_REQ_TRACKER"] | python | 2 | 0 | |
example/google_im2txt/102flower/train.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train the model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import configuration
import show_and_tell_model
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '3' # only /gpu:gpu_id is visible
FLAGS = tf.app.flags.FLAGS
tf.flags.DEFINE_string("input_file_pattern", "",
"File pattern of sharded TFRecord input files.")
tf.flags.DEFINE_string("inception_checkpoint_file", "",
"Path to a pretrained inception_v3 model.")
tf.flags.DEFINE_string("train_dir", "",
"Directory for saving and loading model checkpoints.")
tf.flags.DEFINE_boolean("train_inception", False,
"Whether to train inception submodel variables.")
tf.flags.DEFINE_integer("number_of_steps", 1000, "Number of training steps.")
tf.flags.DEFINE_integer("log_every_n_steps", 1,
"Frequency at which loss and global step are logged.")
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
assert FLAGS.input_file_pattern, "--input_file_pattern is required"
assert FLAGS.train_dir, "--train_dir is required"
model_config = configuration.ModelConfig()
model_config.input_file_pattern = FLAGS.input_file_pattern
model_config.inception_checkpoint_file = FLAGS.inception_checkpoint_file
training_config = configuration.TrainingConfig()
# Create training directory.
train_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(train_dir):
tf.logging.info("Creating training directory: %s", train_dir)
tf.gfile.MakeDirs(train_dir)
# Build the TensorFlow graph.
g = tf.Graph()
with g.as_default():
# Build the model.
model = show_and_tell_model.ShowAndTellModel(
model_config, mode="train", train_inception=FLAGS.train_inception)
model.build()
# Set up the learning rate.
learning_rate_decay_fn = None
if FLAGS.train_inception:
learning_rate = tf.constant(training_config.train_inception_learning_rate)
else:
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_rate_decay_fn = _learning_rate_decay_fn
# Set up the training ops.
train_op = tf.contrib.layers.optimize_loss(
loss=model.total_loss,
global_step=model.global_step,
learning_rate=learning_rate,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
# Run training.
tf.contrib.slim.learning.train(
train_op,
train_dir,
log_every_n_steps=FLAGS.log_every_n_steps,
graph=g,
global_step=model.global_step,
number_of_steps=FLAGS.number_of_steps,
init_fn=model.init_fn,
saver=saver,
session_config=session_config)
if __name__ == "__main__":
tf.app.run()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
packaging/scripts/build_matlab_local.py | import sys
import shutil
import subprocess
import re
from pathlib import Path
import os
import glob
import argparse
def main():
parser = argparse.ArgumentParser(description="Build MATLAB locally")
parser.add_argument("--matlab-version", type=str, default="R2020a", help="MATLAB version to use")
parser.add_argument("--matlab-root", type=str, default=None, help="MATLAB root location (default is searched)")
parser.add_argument("--semver-full", type=str, default=None, help="Full semver of release")
parser.add_argument("--no-vcpkg", action='store_true')
parser.add_argument("--no-clean", action='store_true')
args = parser.parse_args()
config_header = Path("RobotRaconteurCore/include/RobotRaconteur/RobotRaconteurConfig.h").absolute()
assert config_header.is_file()
if args.semver_full is not None:
ver_str = args.semver_full
semver_tag_regex = r"^(((?:0|[1-9]\d*)\.(?:0|[1-9]\d*)\.(?:0|[1-9]\d*))(-(?:alpha|beta|rc)\d+)?)"
m = re.match(semver_tag_regex,ver_str)
assert m, f"Invalid semver-full {ver_str}"
else:
with open(config_header) as f:
f1 = f.read()
ver_str_m = re.search("ROBOTRACONTEUR_VERSION_TEXT \"(\\d+\\.\\d+\\.\\d+)\"", f1)
ver_str = ver_str_m.group(1)
print("version " + ver_str)
build_path = Path("build").absolute()
if not args.no_clean:
if build_path.is_dir():
shutil.rmtree(str(build_path))
if sys.platform == "win32":
vcpkg_triplet = "x64-windows-static-md"
elif sys.platform == "darwin":
vcpkg_triplet = "x64-osx"
else:
vcpkg_triplet = "x64-linux"
vcpkg_path = Path("vcpkg").absolute()
if not args.no_vcpkg:
vcpkg_libs = "boost-algorithm " \
"boost-array boost-asio "\
"boost-assign boost-atomic boost-bind boost-config boost-container boost-date-time " \
"boost-smart-ptr boost-filesystem boost-foreach boost-format boost-function boost-interprocess " \
"boost-intrusive boost-lexical-cast boost-locale boost-random boost-range boost-regex " \
"boost-scope-exit boost-signals2 boost-thread boost-tuple boost-unordered " \
"boost-utility boost-uuid boost-program-options"
if vcpkg_path.is_dir():
assert vcpkg_path.joinpath(".git").is_dir()
subprocess.check_call("git pull", shell=True, cwd=vcpkg_path)
else:
subprocess.check_call("git clone --depth=1 https://github.com/microsoft/vcpkg.git", shell=True)
if sys.platform == "win32":
subprocess.check_call("bootstrap-vcpkg.bat", shell=True, cwd=vcpkg_path)
subprocess.check_call(f"vcpkg install --triplet {vcpkg_triplet} {vcpkg_libs}" , shell=True, cwd=vcpkg_path)
else:
subprocess.check_call("./bootstrap-vcpkg.sh", shell=True, cwd=vcpkg_path)
subprocess.check_call(f"./vcpkg install --triplet {vcpkg_triplet} {vcpkg_libs}" , shell=True, cwd=vcpkg_path)
matlab_root = None
if args.matlab_root is not None:
matlab_root = Path(args.matlab_root)
else:
matlab_ver = args.matlab_version
assert re.match(r"^R\d{4}[a-z]$", matlab_ver)
if sys.platform == "win32":
import winreg
mw_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, f"SOFTWARE\\MathWorks\\{matlab_ver}\\MATLAB")
matlab_path1 = winreg.QueryValue(mw_key,None)
mw_key.Close()
matlab_path_m = re.match(r"^(.*)\\bin\\.*$", matlab_path1)
assert matlab_path_m
matlab_path = Path(matlab_path_m.group(1))
assert matlab_path.joinpath("bin/matlab.exe").exists()
elif sys.platform == "darwin":
matlab_path = Path(f"/Applications/MATLAB_{matlab_ver}.app")
assert matlab_path.joinpath("bin/matlab").exists()
else:
matlab_path = Path(os.environ("HOME")).joinpath(f"/MATLAB/{matlab_ver}")
assert matlab_path.joinpath("bin/matlab").exists()
vcpkg_toolchain_file = vcpkg_path.joinpath("scripts/buildsystems/vcpkg.cmake").absolute()
assert vcpkg_toolchain_file.exists()
subprocess.check_call("cmake -G \"Ninja\" -DBUILD_GEN=ON -DBUILD_TEST=ON -DBoost_USE_STATIC_LIBS=ON " \
"-DCMAKE_BUILD_TYPE=Release -DBUILD_MATLAB_MEX=ON Boost_NO_SYSTEM_PATHS=ON " \
f"-DVCPKG_TARGET_TRIPLET={vcpkg_triplet} " \
f"-DCMAKE_TOOLCHAIN_FILE={vcpkg_toolchain_file} " \
f"-DROBOTRACONTEUR_VERSION_SEMVER=\"{ver_str}\" "\
"-S . -B build", shell=True)
subprocess.check_call("cmake --build . --config Release", shell=True, cwd=build_path)
if sys.platform == "linux":
machine="glnxa64"
elif sys.platform == "darwin":
machine="maci64"
elif sys.platform=="win32":
machine="win64"
else:
machine=f"UNKNOWN-{sys.platform}"
try:
os.unlink(glob.glob(f"build/out/Matlab/*.lib")[0])
os.unlink(glob.glob(f"build/out/Matlab/*.exp")[0])
os.unlink(glob.glob(f"build/out/Matlab/*.pdb")[0])
except:
pass
shutil.move(glob.glob("build/out/Matlab")[0],f"build/out/RobotRaconteur-{ver_str}-MATLAB-{machine}")
shutil.copy("LICENSE.txt", f"build/out/RobotRaconteur-{ver_str}-MATLAB-{machine}")
if sys.platform == "win32":
subprocess.check_call(["zip", f"RobotRaconteur-{ver_str}-MATLAB-{machine}.zip", "-FSr", f"RobotRaconteur-{ver_str}-MATLAB-{machine}"], cwd="build/out")
else:
subprocess.check_call(["tar" ,"cvzf", f"RobotRaconteur-{ver_str}-MATLAB-{machine}.tar.gz", f"RobotRaconteur-{ver_str}-MATLAB-{machine}"], cwd="build/out")
if __name__ == "__main__":
main() | []
| []
| []
| [] | [] | python | 0 | 0 | |
fiubar/config/wsgi.py | """
WSGI config for fiubar project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"fiubar.config.settings.production")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
webdriver_manager/utils.py | import datetime
import os
import platform
import re
import sys
import requests
from webdriver_manager.archive import Archive
from webdriver_manager.logger import log
class File(object):
def __init__(self, stream):
self.content = stream.content
self.__stream = stream
self.__temp_name = "driver"
@property
def filename(self) -> str:
try:
filename = re.findall("filename=(.+)", self.__stream.headers["content-disposition"])[0]
except KeyError:
filename = f"{self.__temp_name}.zip"
except IndexError:
filename = f"{self.__temp_name}.exe"
if '"' in filename:
filename = filename.replace('"', "")
return filename
def save_file(file: File, directory: str):
os.makedirs(directory, exist_ok=True)
archive_path = f"{directory}{os.sep}{file.filename}"
with open(archive_path, "wb") as code:
code.write(file.content)
return Archive(archive_path)
class OSType(object):
LINUX = "linux"
MAC = "mac"
WIN = "win"
class ChromeType(object):
GOOGLE = 'google-chrome'
CHROMIUM = 'chromium'
MSEDGE = 'edge'
def os_name():
pl = sys.platform
if pl == "linux" or pl == "linux2":
return OSType.LINUX
elif pl == "darwin":
return OSType.MAC
elif pl == "win32":
return OSType.WIN
def os_architecture():
if platform.machine().endswith('64'):
return 64
else:
return 32
def os_type():
return os_name() + str(os_architecture())
def validate_response(resp):
if resp.status_code == 404:
raise ValueError("There is no such driver by url {}".format(resp.url))
elif resp.status_code != 200:
raise ValueError(resp.json())
def write_file(content, path):
with open(path, "wb") as code:
code.write(content)
return path
def download_file(url: str) -> File:
log(f"Trying to download new driver from {url}")
response = requests.get(url, stream=True)
validate_response(response)
return File(response)
def get_date_diff(date1, date2, date_format):
a = datetime.datetime.strptime(date1, date_format)
b = datetime.datetime.strptime(str(date2.strftime(date_format)), date_format)
return (b - a).days
def get_filename_from_response(response, name):
try:
filename = re.findall("filename=(.+)", response.headers["content-disposition"])[0]
except KeyError:
filename = "{}.zip".format(name)
except IndexError:
filename = name + ".exe"
if '"' in filename:
filename = filename.replace('"', "")
return filename
def linux_browser_apps_to_cmd(*apps: str) -> str:
"""Create chrome version command from browser app names.
Result command example:
chromium --version || chromium-browser --version
"""
ignore_errors_cmd_part = ' 2>/dev/null' if os.getenv('WDM_LOG_LEVEL') == '0' else ''
return ' || '.join(list(map(lambda i: f'{i} --version{ignore_errors_cmd_part}', apps)))
def chrome_version(browser_type=ChromeType.GOOGLE):
pattern = r'\d+\.\d+\.\d+'
cmd_mapping = {
ChromeType.GOOGLE: {
OSType.LINUX: linux_browser_apps_to_cmd('google-chrome', 'google-chrome-stable'),
OSType.MAC: r'/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --version',
OSType.WIN: r'reg query "HKEY_CURRENT_USER\Software\Google\Chrome\BLBeacon" /v version'
},
ChromeType.CHROMIUM: {
OSType.LINUX: linux_browser_apps_to_cmd('chromium', 'chromium-browser'),
OSType.MAC: r'/Applications/Chromium.app/Contents/MacOS/Chromium --version',
OSType.WIN: r'reg query "HKLM\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\Google Chrome" /v version'
},
ChromeType.MSEDGE: {
OSType.MAC: r'/Applications/Microsoft\ Edge.app/Contents/MacOS/Microsoft\ Edge --version',
OSType.WIN: r'reg query "HKEY_CURRENT_USER\SOFTWARE\Microsoft\Edge\BLBeacon" /v version',
}
}
cmd = cmd_mapping[browser_type][os_name()]
version = None
with os.popen(cmd) as stream:
stdout = stream.read()
version = re.search(pattern, stdout)
if not version:
raise ValueError(f'Could not get version for Chrome with this command: {cmd}')
current_version = version.group(0)
return current_version
def firefox_version():
pattern = r'\d+.*'
cmd_mapping = {
OSType.LINUX: 'firefox --version',
OSType.MAC: r'/Applications/Firefox.app/Contents/MacOS/firefox --version',
OSType.WIN: r"Powershell (Get-Item (Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\firefox.exe').'(Default)').VersionInfo.ProductVersion",
}
cmd = cmd_mapping[os_name()]
version = None
with os.popen(cmd) as stream:
stdout = stream.read()
version = re.search(pattern, stdout)
if not version:
raise ValueError(f'Could not get version for Firefox with this command: {cmd}')
current_version = version.group(0)
return current_version
| []
| []
| [
"WDM_LOG_LEVEL"
]
| [] | ["WDM_LOG_LEVEL"] | python | 1 | 0 | |
src/main/java/org/fusfoundation/kranion/GUIControl.java | /*
* The MIT License
*
* Copyright 2016 Focused Ultrasound Foundation.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.fusfoundation.kranion;
//import com.sun.scenario.effect.impl.BufferUtil;
import java.awt.Color;
import java.awt.Font;
import java.awt.FontMetrics;
import java.awt.Graphics2D;
import java.awt.RenderingHints;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.font.FontRenderContext;
import java.awt.geom.Rectangle2D;
import java.awt.image.BufferedImage;
import java.beans.PropertyChangeEvent;
import java.lang.reflect.Array;
import java.nio.ByteBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Observable;
import java.util.Observer;
import java.util.concurrent.CopyOnWriteArrayList;
import static org.fusfoundation.kranion.TextBox.getScreenCoords;
import static org.fusfoundation.kranion.TextBox.getWorldCoords;
import org.lwjgl.BufferUtils;
import static org.lwjgl.opengl.ARBClearTexture.glClearTexImage;
import static org.lwjgl.opengl.ARBClearTexture.glClearTexSubImage;
import org.lwjgl.opengl.Display;
import static org.lwjgl.opengl.GL11.*;
import static org.lwjgl.opengl.GL12.GL_CLAMP_TO_EDGE;
import org.lwjgl.util.glu.GLU;
import org.lwjgl.util.vector.Matrix4f;
import org.lwjgl.util.vector.Vector4f;
import org.lwjgl.util.vector.Vector2f;
import org.lwjgl.util.vector.Vector3f;
/**
*
* @author John Snell
*/
// Some shared bookeeping for GUI controls
public abstract class GUIControl extends Renderable implements org.fusfoundation.kranion.MouseListener, Observer {
private String command = new String();
private String title = new String();
private String propertyPrefix = new String();
protected boolean isVisible = true;
protected boolean isEnabled = true;
protected boolean mouseInside = false;
protected Rectangle bounds = new Rectangle();
protected ArrayList<ActionListener> listeners = new ArrayList<>();
protected GUIControl parent = null;
protected GUIControl grabbedChild = null; // child control that has grabbed the mouse
protected float xgrab, ygrab;
protected List<Renderable> children = new CopyOnWriteArrayList<>();
protected boolean isTextEditable = false;
protected Thread myThread = Thread.currentThread();
protected UpdateEventQueue updateEventQueue = new UpdateEventQueue();
protected static Font stdfont = null;
public GUIControl() {
if (stdfont == null) {
String os = System.getenv("OS");
if (os != null && os.startsWith("Windows")) {
stdfont = new Font("Helvetica", Font.PLAIN | Font.TRUETYPE_FONT , 16);
}
else {
stdfont = new Font("Helvetica", Font.PLAIN | Font.TRUETYPE_FONT , 15);
}
}
}
public void addChild(Renderable child) {
if (child instanceof GUIControl) {
((GUIControl)child).parent = this;
}
children.add(child);
}
public void removeChild(Renderable child) {
children.remove(child);
}
@Override
public boolean getIsDirty() {
updateEventQueue.handleEvents(this);
advanceChildren(); // all animators will set dirty as needed
if (isDirty) {
// System.out.println("I am dirty: " + this);
return true;
}
Iterator<Renderable> i = children.iterator();
while (i.hasNext()) {
Renderable child = i.next();
if (child.getIsDirty()) {
// System.out.println(" Dirty child: " + child);
return true;
}
}
return false;
}
@Override
public Renderable setIsDirty(boolean dirty) {
isDirty = dirty;
// if (dirty == false) {
Iterator<Renderable> i = children.iterator();
while (i.hasNext()) {
Renderable child = i.next();
child.setIsDirty(dirty);
}
// }
return this;
}
public void setIsEnabled(boolean enabled) {
if (this.isEnabled != enabled) {
setIsDirty(true);
}
this.isEnabled = enabled;
}
public boolean getIsEnabled() {
return this.isEnabled;
}
// Some children who are animators may need to be checked to
// see if they are "dirty" before the render cycle, otherwise
// animation changes might not get rendered.
public void advanceChildren() {
Iterator<Renderable> i = children.iterator();
while (i.hasNext()) {
Renderable child = i.next();
if (child instanceof Animator) {
((Animator) child).advanceFrame();
}
}
}
public void renderChildren() {
glMatrixMode(GL_MODELVIEW);
Main.glPushMatrix();
glTranslatef(bounds.x, bounds.y, 0.1f);
Iterator<Renderable> i = children.iterator();
while (i.hasNext()) {
Renderable child = i.next();
if (child instanceof Animator) {
((Animator) child).advanceFrame();
}
// int startStackDepth = glGetInteger(GL_ATTRIB_STACK_DEPTH);
child.render();
// int endStackDepth = glGetInteger(GL_ATTRIB_STACK_DEPTH);
// if (startStackDepth != endStackDepth) {
// System.out.println("ATTRIB stack leak: " + child + "[" + startStackDepth + "->" + endStackDepth + "]");
// }
//System.out.println(child);
//if (Main.checkForGLError() != GL_NO_ERROR) {
// System.out.println(child);
// System.out.println("MODELVIEW stack depth: " + glGetInteger(GL_MODELVIEW_STACK_DEPTH));
// System.out.println("MODELVIEW max stack depth: " + glGetInteger(GL_MAX_MODELVIEW_STACK_DEPTH));
// System.out.println("PROJECTIONVIEW stack depth: " + glGetInteger(GL_PROJECTION_STACK_DEPTH));
// System.out.println("PROJECTIONVIEW max stack depth: " + glGetInteger(GL_MAX_PROJECTION_STACK_DEPTH));
//}
}
Main.glPopMatrix();
}
public void bringToTop() {
// If we have a parent, move us to the last to be drawn and first to get mouse events
if (parent != null) {
if (parent.children.remove(this)) {
parent.children.add(this);
}
}
}
public void sendToBottom() {
// If we have a parent, move us to the last to be drawn and first to get mouse events
if (parent != null) {
if (parent.children.remove(this)) {
parent.children.add(0, this);
}
}
}
public void renderPickableChildren() {
Iterator<Renderable> i = children.iterator();
while (i.hasNext()) {
Renderable child = i.next();
if (child instanceof Pickable) {
((Pickable) child).renderPickable();
}
else if (child instanceof Trackball) { // TODO: kind of a hack, if Trackball doesn't render then orientation isn't correct
child.render();
}
}
}
public void setTextEditable(boolean isEditable) {
this.isTextEditable = isEditable;
}
public boolean getTextEditable() { return isTextEditable; }
@Override
public void release() {
Iterator<Renderable> i = children.iterator();
while (i.hasNext()) {
Renderable child = i.next();
child.release();
}
this.removeFromSet();
listeners.clear();
}
protected void grabMouse(float x, float y) {
if (parent != null) {
if (parent.grabbedChild == null) {
parent.grabbedChild = this;
xgrab = x;
ygrab = y;
}
}
}
protected void ungrabMouse() {
if (parent != null) {
if (parent.grabbedChild == this) {
parent.grabbedChild = null;
xgrab = ygrab = -1;
}
}
}
protected boolean hasGrabbed() {
if (parent!=null && parent.grabbedChild == this) {
return true;
}
else {
return false;
}
}
public GUIControl setCommand(String cmd) {
this.command = new String(cmd);
return this;
}
public String getCommand() { return command; }
public GUIControl setTitle(String cmd) {
this.title = new String(cmd);
setIsDirty(true);
return this;
}
public String getTitle() { return title; }
@Override
public boolean getVisible() { return isVisible; }
@Override
public Renderable setVisible(boolean visible) {
if (isVisible != visible) {
setIsDirty(true);
}
isVisible = visible;
return this;
}
public void setBounds(float x, float y, float width, float height) {
if (x != bounds.x || y != bounds.y || width != bounds.width || height != bounds.height) {
setIsDirty(true);
bounds.setBounds(x, y, width, height);
}
}
public void setBounds(Rectangle r) {
if (r.x != bounds.x || r.y != bounds.y || r.width != bounds.width || r.height != bounds.height) {
setIsDirty(true);
bounds.setBounds(r);
}
}
public Rectangle getBounds() { return bounds; }
public boolean contains(float x, float y) {
return bounds.contains(x, y);
}
public void addActionListener(ActionListener listener) {
if (listener != null) {
listeners.add(listener);
}
}
public void removeActionListener(ActionListener listener) {listeners.remove(listener);}
public void fireActionEvent() {
ActionEvent event = new ActionEvent(this, ActionEvent.ACTION_PERFORMED, command);
fireActionEvent(event);
}
public void fireActionEvent(ActionEvent event) {
Iterator<ActionListener> i = listeners.iterator();
while (i.hasNext()) {
i.next().actionPerformed(event);
}
}
public void fireActionEvent(ActionListener dontNotifyMe) {
ActionEvent event = new ActionEvent(this, ActionEvent.ACTION_PERFORMED, command);
fireActionEvent(event, dontNotifyMe);
}
public void fireActionEvent(ActionEvent event, ActionListener dontNotifyMe) {
Iterator<ActionListener> i = listeners.iterator();
while (i.hasNext()) {
ActionListener al = i.next();
if (al != dontNotifyMe) {
al.actionPerformed(event);
}
}
}
public void fireActionEvent(String altcmd) {
ActionEvent event = new ActionEvent(this, ActionEvent.ACTION_PERFORMED, altcmd);
Iterator<ActionListener> i = listeners.iterator();
while (i.hasNext()) {
i.next().actionPerformed(event);
}
}
@Override
public boolean OnMouse(float x, float y, boolean button1down, boolean button2down, int dwheel) {
boolean currentInside = MouseIsInside(x, y);
if (mouseInside != currentInside) {
mouseInside = currentInside;
setIsDirty(true);
}
// if (currentInside && !mouseInside) {
// mouseInside = true;
// setIsDirty(true);
// }
// else if (!currentInside && mouseInside) {
// mouseInside = false;
// setIsDirty(true);
// }
if (getVisible()) {
if (this.grabbedChild != null) {
return grabbedChild.OnMouse(x - bounds.getIntX(), y - bounds.getIntY(), button1down, button2down, dwheel);
}
else {
// things are drawn in list order, so mouse order is reversed (things drawn last, get mouse first)
ListIterator<Renderable> i = children.listIterator(children.size());
while(i.hasPrevious()) {
Renderable child = i.previous();
if (child.getVisible() && child instanceof MouseListener) {
if ( ((MouseListener)child).OnMouse(x - bounds.getIntX(), y - bounds.getIntY(), button1down, button2down, dwheel) ) {
return true;
}
}
}
}
}
return false;
}
@Override
public boolean MouseIsInside(float x, float y) {
return contains(x, y);
}
// This one get's called when the property name == command name
// This is a shortcut for simple, one property controls
public void update(Object newValue) {
// Override to support property change notifications
}
// If property name != command name, this one gets called
// For objects that have multiple bound properties
public void update(String propertyName, Object newValue) {
// Override to support property change notifications
}
@Override
public void update(Observable o, Object arg) {
if (arg != null && arg instanceof PropertyChangeEvent) {
// If the update is coming from other than the main (OpenGL) thread
// then queue it for later when it can be handled on main thread
if (myThread != Thread.currentThread()) {
this.updateEventQueue.push(o, arg);
return;
}
PropertyChangeEvent propEvt = (PropertyChangeEvent) arg;
String propName = this.getFilteredPropertyName(propEvt);
if (propName.length() > 0) {
if (propName.equals(this.getCommand())) {
update(propEvt.getNewValue());
}
// else {
update(propName, propEvt.getNewValue()); // TODO: Always pass?
// }
}
}
}
public GUIControl setPropertyPrefix(String name) {
this.propertyPrefix = name;
return this;
}
protected String getFilteredPropertyName(PropertyChangeEvent arg) {
String propName = "";
String nameString = arg.getPropertyName();
if (nameString.startsWith(propertyPrefix + "[")) {
int last = nameString.indexOf("]", propertyPrefix.length()+1);
propName = nameString.substring(propertyPrefix.length()+1, last);
}
return propName;
}
public float getGuiScale() {
// FloatBuffer modelMatrix = BufferUtil.newFloatBuffer(16);
// glGetFloat(GL_MODELVIEW_MATRIX, modelMatrix);
//
// Matrix4f modelview = new Matrix4f();
// modelview.load(modelMatrix);
//
// Vector4f scale = new Vector4f(1f, 0f, 0f, 0f);
// Matrix4f.transform(modelview, scale, scale);
//
// return scale.x;
//Disabling for now, needs to be rethought
return 1f;
}
public static enum VPosFormat {
VPOSITION_TOP,
VPOSITION_CENTER,
VPOSITION_BOTTOM
}
public static enum HPosFormat {
HPOSITION_LEFT,
HPOSITION_CENTER,
HPOSITION_RIGHT
}
public void renderText(String str, Rectangle rect, Font font, Color color, boolean shadowed, VPosFormat vpos, HPosFormat hpos) {
renderText(str, rect, font, color, shadowed, vpos, hpos, false, -1, -1);
}
public int calculateCaretPos(String str, Rectangle rect, Font font, float mouseX, float mouseY, VPosFormat vpos, HPosFormat hpos, int currentCursorPosition) {
if (str != null && str.length() != 0) {
} else {
return 0;
}
BufferedImage img = new BufferedImage(rect.getIntWidth(), rect.getIntHeight(), BufferedImage.TYPE_4BYTE_ABGR);
Graphics2D gc = (Graphics2D)img.getGraphics();
gc.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
gc.setRenderingHint(RenderingHints.KEY_FRACTIONALMETRICS,
RenderingHints.VALUE_FRACTIONALMETRICS_ON);
if (font == null) font = stdfont;
FontMetrics metrics = gc.getFontMetrics(font);
float cursorXPos = -1f;
try {
cursorXPos = metrics.stringWidth(str.substring(0, currentCursorPosition));
}
catch(StringIndexOutOfBoundsException e) {
cursorXPos = -1f;
}
// float cursorXPos = (float)metrics.getStringBounds(str, 0, currentCursorPosition, gc).getMaxX();
float hScroll = 0f;
float textHPos = 0f;
switch(hpos) {
case HPOSITION_LEFT:
textHPos = 1;
if (currentCursorPosition > -1) {
hScroll = Math.max(0f, cursorXPos - (rect.width));
}
break;
case HPOSITION_RIGHT:
// textHPos = rect.width - newWidth;
if (currentCursorPosition > -1) {
//// hScroll = Math.min(0, Math.round(cursorXPos) + (rect.getIntWidth() - 1 - (int)newWidth));
}
break;
case HPOSITION_CENTER:
// textHPos = rect.width/2 - newWidth/2;
break;
}
for (int i=0; i<=str.length(); i++) {
Rectangle2D r1 = metrics.getStringBounds(str, 0, Math.max(i-1, 0), gc);
Rectangle2D r2 = metrics.getStringBounds(str, 0, i, gc);
float index = (float)((r2.getMaxX() - r1.getMaxX())/2f + r1.getMaxX());
System.out.println("pos " + i + " = " + index + " ? " + (mouseX-(rect.x-hScroll)) + " hscroll " + hScroll);
if (index >= (mouseX-(rect.x-hScroll)) ) {
// if (hScroll > 0f) {
// return Math.max(0, i);
// }
// else {
System.out.println();
return Math.max(0, i-1);
// }
}
}
return str.length();
// float textVPos = 0f;
// int hScroll = 0;
// switch(vpos) {
// case VPOSITION_TOP:
// textVPos = metrics.getAscent();
// break;
// case VPOSITION_CENTER:
// textVPos = (metrics.getAscent() + (rect.height - (metrics.getAscent() + metrics.getDescent())) / 2);
// break;
// case VPOSITION_BOTTOM:
// textVPos = rect.height - metrics.getDescent();
// }
//
}
public void renderTextCore(BufferedImage dest, String str, Rectangle rect, Font font, Color color, Color fill, boolean shadowed, VPosFormat vpos, HPosFormat hpos, boolean showCaret, int cursorPos) {
renderTextCore(dest, str, rect, font, color, fill, shadowed, vpos, hpos, showCaret, cursorPos, -1);
}
public void renderTextCore(BufferedImage dest, String str, Rectangle rect, Font font, Color color, Color fill, boolean shadowed, VPosFormat vpos, HPosFormat hpos, boolean showCaret, int cursorPos, int selectEndPos) {
Graphics2D gc = (Graphics2D) dest.getGraphics();
gc.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
gc.setRenderingHint(RenderingHints.KEY_FRACTIONALMETRICS,
RenderingHints.VALUE_FRACTIONALMETRICS_ON);
if (font == null) font = stdfont;
FontMetrics metrics = gc.getFontMetrics(font);
Rectangle2D textBound = metrics.getStringBounds(str, gc);
float newWidth = metrics.stringWidth(str);
float textHeight = metrics.getHeight();
float cursorXPos = -1f;
float selectEndXPos = -1f;
try {
cursorXPos = metrics.stringWidth(str.substring(0, cursorPos));
}
catch(StringIndexOutOfBoundsException e) {
cursorXPos = -1f;
}
try {
selectEndXPos = metrics.stringWidth(str.substring(0, selectEndPos));
}
catch(StringIndexOutOfBoundsException e) {
selectEndXPos = -1f;
}
float textVPos = 0f;
int hScroll = 0;
switch(vpos) {
case VPOSITION_TOP:
textVPos = metrics.getAscent();
break;
case VPOSITION_CENTER:
textVPos = (metrics.getAscent() + (rect.height - (metrics.getAscent() + metrics.getDescent())) / 2);
break;
case VPOSITION_BOTTOM:
textVPos = rect.height - metrics.getDescent();
}
float textHPos = 0f;
switch(hpos) {
case HPOSITION_LEFT:
textHPos = 1;
if (cursorXPos > -1) {
hScroll = Math.max(0, Math.round(cursorXPos) - (rect.getIntWidth() - 1));
}
break;
case HPOSITION_RIGHT:
textHPos = rect.width - newWidth;
if (cursorXPos > -1) {
hScroll = Math.min(0, Math.round(cursorXPos) + (rect.getIntWidth() - 1 - (int)newWidth));
}
break;
case HPOSITION_CENTER:
textHPos = rect.width/2 - newWidth/2;
break;
}
// defensive clear, prob not necessary
gc.setColor(new Color(0, 0, 0, 0));
gc.fillRect(0, 0, rect.getIntWidth()+2, rect.getIntHeight()+2);
gc.setFont(font);
// draw selection if any
if (cursorXPos != -1 && selectEndXPos != -1) {
gc.setColor(new Color(0, 0.5f, 0, 1));
if (selectEndXPos > cursorXPos) {
gc.fillRect((int)cursorXPos, 3, (int)(selectEndXPos - cursorXPos)+2, rect.getIntHeight()-5);
}
else {
gc.fillRect((int)selectEndXPos, 3, (int)(cursorXPos - selectEndXPos)+2, rect.getIntHeight()-5);
}
}
if (fill != null) {
gc.setColor(fill);
gc.fillRect(0, 0, rect.getIntWidth(), rect.getIntHeight());
}
else {
gc.setColor(new Color(0, 0, 0, 0));
gc.fillRect(0, 0, dest.getWidth(), dest.getHeight());
}
if (shadowed) {
gc.setColor(new Color(0.0f, 0.0f, 0.0f, 1.0f));
gc.drawString(str, textHPos+1 - hScroll, textVPos+1);
}
if (isEnabled) {
gc.setColor(color);
}
else {
gc.setColor(new Color(0.4f, 0.4f, 0.4f, 1.0f));
}
gc.drawString(str, textHPos - hScroll, textVPos);
if (isTextEditable && hasKeyboardFocus() && cursorPos > -1 && showCaret) {
gc.drawLine(
(int)textHPos + (int)cursorXPos - 1 - hScroll,
(int)textVPos + metrics.getDescent(),
(int)textHPos + (int)cursorXPos - 1 - hScroll,
(int)textVPos + metrics.getDescent() - (int)textHeight);
}
}
public void renderText(String str, Rectangle rect, Font font, Color color, boolean shadowed, VPosFormat vpos, HPosFormat hpos, boolean showCaret, int cursorPos, int selectionEndPos) {
if (rect.width <= 0 || rect.height <= 0) return;
BufferedImage img = new BufferedImage(rect.getIntWidth(), rect.getIntHeight(), BufferedImage.TYPE_4BYTE_ABGR);
renderTextCore(img, str, rect, font, color, null, shadowed, vpos, hpos, showCaret, cursorPos, selectionEndPos);
renderBufferedImageViaTexture(img, rect);
}
public void renderText(String str, Rectangle rect, Font font, Color color, Color fill, boolean shadowed, VPosFormat vpos, HPosFormat hpos, boolean showCaret, int cursorPos, int selectionEndPos) {
if (rect.width <= 0 || rect.height <= 0) return;
BufferedImage img = new BufferedImage(rect.getIntWidth(), rect.getIntHeight(), BufferedImage.TYPE_4BYTE_ABGR);
renderTextCore(img, str, rect, font, color, fill, shadowed, vpos, hpos, showCaret, cursorPos, selectionEndPos);
renderBufferedImageViaTexture(img, rect);
}
public Rectangle getStringBounds(String str, Font font) {
BufferedImage img = new BufferedImage(1, 1, BufferedImage.TYPE_4BYTE_ABGR);
Graphics2D gc = (Graphics2D) img.getGraphics();
gc.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
gc.setRenderingHint(RenderingHints.KEY_FRACTIONALMETRICS,
RenderingHints.VALUE_FRACTIONALMETRICS_ON);
if (font == null) font = stdfont;
FontMetrics metrics = gc.getFontMetrics(font);
float textWidth = metrics.stringWidth(str);
float textHeight = metrics.getHeight();
return new Rectangle(0, 0, textWidth, textHeight);
}
public void renderBufferedImage(BufferedImage img, Rectangle rect) {
// Don't modify the Rectangle passed in
Rectangle imgRect = new Rectangle(rect);
if (img != null) {
Main.glPushAttrib(GL_POLYGON_BIT | GL_LINE_BIT | GL_ENABLE_BIT | GL_TRANSFORM_BIT);
float xoffset = 0;
float yoffset = 0;
BufferedImage imageToUse = img;
Vector2f screenPos = getScreenCoords(imgRect.x, imgRect.y, 0);
if (screenPos == null) screenPos = new Vector2f(0, 0);
if (screenPos.x < 0 || screenPos.y < 0) {
if (screenPos.x < 0) xoffset = -screenPos.x;
if (screenPos.y < 0) yoffset = -screenPos.y;
if (Math.round(xoffset) >= img.getWidth() ||
Math.round(yoffset) >= img.getHeight()) {
Main.glPopAttrib();
return; // we are totally off screen
}
imageToUse = img.getSubimage(
Math.round(xoffset),
Math.round(yoffset),
Math.round(imgRect.width - xoffset),
Math.round(imgRect.height - yoffset));
}
byte buf[] = (byte[]) imageToUse.getRaster().getDataElements(0, 0, imageToUse.getWidth(), imageToUse.getHeight(), null);
glPixelZoom(1.0f, -1.0f);
glDisable(GL_CLIP_PLANE0);
glDisable(GL_CLIP_PLANE1);
glDisable(GL_DEPTH_TEST);
ByteBuffer bbuf = ByteBuffer.allocateDirect(buf.length);
bbuf.put(buf, 0, buf.length);
bbuf.flip();
Vector3f woffset;
if (xoffset > 0f || yoffset > 0f) {
woffset = getWorldCoords(0, 0);
if (woffset == null) woffset = new Vector3f();
if (xoffset > 0) imgRect.x = woffset.x;
if (yoffset > 0) imgRect.y = woffset.y;
}
glRasterPos2f(Math.max(0f, imgRect.x), Math.max(0f, imgRect.y+imageToUse.getHeight()));
glDrawPixels(imageToUse.getWidth(), imageToUse.getHeight(), GL_RGBA, GL_UNSIGNED_BYTE, bbuf);
// glColor3f(1, 1, 1);
// glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
// glBegin(GL_QUADS);
// glVertex2f(imgRect.x, imgRect.y);
// glVertex2f(imgRect.x + imageToUse.getWidth(), imgRect.y);
// glVertex2f(imgRect.x + imageToUse.getWidth(), imgRect.y + imageToUse.getHeight());
// glVertex2f(imgRect.x, imgRect.y + imageToUse.getHeight());
// glEnd();
Main.glPopAttrib();
}
}
// This is a scratch texture for displaying Java 2D graphics
private static int backingTextureName = 0;
private static int backingTextureWidth = 0;
private static int backingTextureHeight = 0;
private void createBackingTexture() {
if (backingTextureName == 0 || backingTextureWidth != Display.getWidth() || backingTextureHeight != Display.getHeight()) {
if (backingTextureName != 0) {
glBindTexture(GL_TEXTURE_2D, 0);
glDeleteTextures(backingTextureName);
backingTextureName = 0;
}
backingTextureName = glGenTextures();
backingTextureWidth = Display.getWidth();
backingTextureHeight = Display.getHeight();
glBindTexture(GL_TEXTURE_2D, backingTextureName);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, backingTextureWidth, backingTextureHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, (ByteBuffer)null);
}
}
public void renderBufferedImageViaTexture(BufferedImage img, Rectangle rect) {
// Don't modify the Rectangle passed in
Rectangle imgRect = new Rectangle(rect);
if (img != null) {
Main.glPushAttrib(GL_POLYGON_BIT | GL_LINE_BIT | GL_ENABLE_BIT | GL_TRANSFORM_BIT);
byte buf[] = (byte[]) img.getRaster().getDataElements(0, 0, img.getWidth(), img.getHeight(), null);
glDisable(GL_CLIP_PLANE0);
glDisable(GL_CLIP_PLANE1);
glDisable(GL_DEPTH_TEST);
ByteBuffer bbuf = ByteBuffer.allocateDirect(buf.length);
bbuf.put(buf, 0, buf.length);
bbuf.flip();
createBackingTexture();
// This doesn't seem to be necessary since we always init the rectangular region specified
// These calls are REALLY SLOW on the Linux NVIDIA driver
// glClearTexImage(backingTextureName, 0, GL_RGBA, GL_UNSIGNED_BYTE, (ByteBuffer)null);
// glClearTexSubImage(backingTextureName, 0, 0, 0, 0, img.getWidth()+2, img.getHeight()+2, 1, GL_RGBA, GL_UNSIGNED_BYTE, (ByteBuffer)null);
glBindTexture(GL_TEXTURE_2D, backingTextureName);
// int textureName = glGenTextures();
// glBindTexture(GL_TEXTURE_2D, textureName);
// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, imageToUse.getWidth(), imageToUse.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, bbuf);
// ByteBuffer clearValue = BufferUtils.createByteBuffer(4);
// org.lwjgl.opengl.GL44.glClearTexSubImage(backingTextureName, 0, 0, 0, 0, img.getWidth(), img.getHeight(), 1, GL_RGBA, GL_UNSIGNED_BYTE, clearValue);
// write new texture data
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, img.getWidth(), img.getHeight(), GL_RGBA, GL_UNSIGNED_BYTE, bbuf);
// glRasterPos2f(Math.max(0f, imgRect.x), Math.max(0f, imgRect.y+imageToUse.getHeight()));
// glDrawPixels(imageToUse.getWidth(), imageToUse.getHeight(), GL_RGBA, GL_UNSIGNED_BYTE, bbuf);
glMatrixMode(GL_TEXTURE);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
Rectangle texRect = new Rectangle(
0,
0,
((float)img.getWidth())/(backingTextureWidth),
((float)img.getHeight())/(backingTextureHeight)
);
glColor3f(1, 1, 1);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnable(GL_TEXTURE_2D);
glBegin(GL_QUADS);
glTexCoord2f(texRect.x, texRect.y+texRect.height);
glVertex2f(imgRect.getIntX(), imgRect.getIntY());
glTexCoord2f(texRect.x+texRect.width, texRect.y+texRect.height);
glVertex2f(imgRect.getIntX() + imgRect.getIntWidth(), imgRect.getIntY());
glTexCoord2f(texRect.x+texRect.width, texRect.y);
glVertex2f(imgRect.getIntX() + imgRect.getIntWidth(), imgRect.getIntY() + imgRect.getIntHeight());
glTexCoord2f(texRect.x, texRect.y);
glVertex2f(imgRect.getIntX(), imgRect.getIntY() + imgRect.getIntHeight());
glEnd();
glDisable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
// glDeleteTextures(textureName);
glMatrixMode(GL_TEXTURE);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
Main.glPopAttrib();
}
}
// Allocate for reuse below. Not thread safe!
private static FloatBuffer worldCoords = BufferUtils.createFloatBuffer(4);
private static FloatBuffer screenCoords = BufferUtils.createFloatBuffer(4);
private static IntBuffer viewport = BufferUtils.createIntBuffer(16);
private static FloatBuffer modelView = BufferUtils.createFloatBuffer(16);
private static FloatBuffer projection = BufferUtils.createFloatBuffer(16);
public static Vector2f getScreenCoords(double x, double y, double z) {
glGetFloat(GL_MODELVIEW_MATRIX, modelView);
glGetFloat(GL_PROJECTION_MATRIX, projection);
glGetInteger(GL_VIEWPORT, viewport);
boolean result = GLU.gluProject((float) x, (float) y, (float) z, modelView, projection, viewport, screenCoords);
if (result) {
return new Vector2f(screenCoords.get(0), screenCoords.get(1));
}
return null;
}
public static Vector3f getWorldCoords(double screenx, double screeny) {
return getWorldCoords(screenx, screeny, 0);
}
public static Vector3f getWorldCoords(double screenx, double screeny, double screenZ) {
glGetFloat(GL_MODELVIEW_MATRIX, modelView);
glGetFloat(GL_PROJECTION_MATRIX, projection);
glGetInteger(GL_VIEWPORT, viewport);
float winX = (float) screenx;
float winY = (float) viewport.get(3) - (float) screeny;
boolean result = GLU.gluUnProject((float) screenx, (float) screeny, (float)screenZ, modelView, projection, viewport, worldCoords);
if (result) {
return new Vector3f(worldCoords.get(0), worldCoords.get(1), worldCoords.get(2));
}
return null;
}
}
| [
"\"OS\""
]
| []
| [
"OS"
]
| [] | ["OS"] | java | 1 | 0 | |
src/main/java/io/jenkins/plugins/services/impl/DefaultConfigurationService.java | package io.jenkins.plugins.services.impl;
import io.jenkins.plugins.commons.JsonObjectMapper;
import io.jenkins.plugins.models.GeneratedPluginData;
import io.jenkins.plugins.services.ConfigurationService;
import io.jenkins.plugins.services.ServiceException;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpStatus;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.stream.Collectors;
import java.util.zip.GZIPInputStream;
/**
* <p>Default implementation of <code>ConfigurationService</code></p>
*/
public class DefaultConfigurationService implements ConfigurationService {
private final Logger logger = LoggerFactory.getLogger(DefaultConfigurationService.class);
private enum ModifyType {
ETAG,
LAST_MODIFIED,
NONE
}
private ModifyType modifyType;
private String modifyValue;
public DefaultConfigurationService() {
this.modifyType = null;
this.modifyValue = null;
}
@Override
public GeneratedPluginData getIndexData() throws ServiceException {
final CloseableHttpClient httpClient = HttpClients.createDefault();
try {
final String url = getDataFileUrl();
if (!hasPluginDataChanged(httpClient, url)) {
logger.info("Plugin data file hasn't changed");
return null;
}
final HttpGet get = new HttpGet(url);
final CloseableHttpResponse response = httpClient.execute(get);
if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
final HttpEntity entity = response.getEntity();
final InputStream inputStream = entity.getContent();
final File dataFile = File.createTempFile("plugins", ".json.gzip");
FileUtils.copyToFile(inputStream, dataFile);
final String data = readGzipFile(dataFile);
final GeneratedPluginData generated = JsonObjectMapper.getObjectMapper().readValue(data, GeneratedPluginData.class);
if (response.containsHeader("ETag")) {
modifyType = ModifyType.ETAG;
modifyValue = response.getLastHeader("ETag").getValue();
logger.info(String.format("Using ETag [%s]", modifyValue));
} else if (response.containsHeader("Last-Modified")) {
modifyType = ModifyType.LAST_MODIFIED;
modifyValue = response.getLastHeader("Last-Modified").getValue();
logger.info(String.format("Using Last-Modified [%s]", modifyValue));
} else {
modifyType = ModifyType.NONE;
modifyValue = null;
logger.info("ETag and Last-Modified are not supported by the server");
}
return generated;
} else {
logger.error("Data file not found");
throw new RuntimeException("Data file not found");
}
} catch (Exception e) {
logger.error("Problem getting data file", e);
throw new ServiceException("Problem getting data file", e);
} finally {
try {
httpClient.close();
} catch (IOException e) {
logger.warn("Problem closing HttpClient", e);
}
}
}
private String getDataFileUrl() {
if (System.getenv().containsKey("DATA_FILE_URL")) {
final String url = StringUtils.trimToNull(System.getenv("DATA_FILE_URL"));
if (url == null) {
throw new RuntimeException("Environment variable 'DATA_FILE_URL' is empty");
}
return url;
} else {
final String url = StringUtils.trimToNull(System.getProperty("data.file.url"));
if (url == null) {
throw new RuntimeException("System property 'data.file.url' is not given");
}
return url;
}
}
private String readGzipFile(final File file) {
try(final BufferedReader reader = new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(file)), StandardCharsets.UTF_8))) {
return reader.lines().collect(Collectors.joining());
} catch (Exception e) {
logger.error("Problem decompressing plugin data", e);
throw new RuntimeException("Problem decompressing plugin data", e);
}
}
private boolean hasPluginDataChanged(CloseableHttpClient httpClient, String url) {
if (modifyType == null || modifyType == ModifyType.NONE) {
return true;
}
final HttpHead head = new HttpHead(url);
switch (modifyType) {
case ETAG:
logger.info(String.format("Using ETag [%s]", modifyValue));
head.addHeader("If-None-Match", modifyValue);
break;
case LAST_MODIFIED:
logger.info(String.format("Using Last-Modified [%s]", modifyValue));
head.addHeader("If-Modified-Since", modifyValue);
break;
}
try {
final CloseableHttpResponse response = httpClient.execute(head);
return response.getStatusLine().getStatusCode() != HttpStatus.SC_NOT_MODIFIED;
} catch (Exception e) {
logger.error("Problem determining if plugin data file changed", e);
throw new ServiceException("Problem determining if plugin data file changed", e);
}
}
}
| [
"\"DATA_FILE_URL\""
]
| []
| [
"DATA_FILE_URL"
]
| [] | ["DATA_FILE_URL"] | java | 1 | 0 | |
server/go/handle_graphql/server/main.go | package main
import (
"log"
"net/http"
"os"
"strings"
"github.com/99designs/gqlgen/handler"
"github.com/clarsen/go-trello-workflow/server/go/handle_graphql"
"github.com/gorilla/mux"
)
const defaultPort = "8080"
func addCors(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Required for CORS support to work
w.Header().Set("Access-Control-Allow-Origin", "https://workflow.app.caselarsen.com")
if strings.HasPrefix(r.Header.Get("Origin"), "http://localhost") {
w.Header().Set("Access-Control-Allow-Origin", r.Header.Get("Origin"))
} else if strings.HasPrefix(r.Header.Get("Origin"), "http://192.168.7.26:8000") {
w.Header().Set("Access-Control-Allow-Origin", r.Header.Get("Origin"))
} else if strings.HasPrefix(r.Header.Get("Origin"), "https://workflow.caselarsen.com") {
w.Header().Set("Access-Control-Allow-Origin", r.Header.Get("Origin"))
}
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,X-Amz-Date,Authorization")
// Required for cookies, authorization headers with HTTPS
w.Header().Set("Access-Control-Allow-Credentials", "true")
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
router := mux.NewRouter()
router.Use(addCors)
router.Handle("/", handler.Playground("GraphQL playground", "/api/gql"))
router.Handle("/api/gql",
handler.GraphQL(handle_graphql.NewExecutableSchema(handle_graphql.Config{Resolvers: &handle_graphql.Resolver{}}))).Methods("POST", "OPTIONS")
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, router))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
test/setup/vault.go | package setup
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/solo-io/solo-kit/pkg/utils/syncutils"
"github.com/solo-io/go-utils/log"
"io/ioutil"
"time"
"io"
"regexp"
"strings"
"github.com/onsi/ginkgo"
"github.com/pkg/errors"
)
const defaultVaultDockerImage = "vault:1.1.3"
type VaultFactory struct {
vaultpath string
tmpdir string
Port int
}
func NewVaultFactory() (*VaultFactory, error) {
vaultpath := os.Getenv("VAULT_BINARY")
if vaultpath == "" {
vaultPath, err := exec.LookPath("vault")
if err == nil {
log.Printf("Using vault from PATH: %s", vaultPath)
vaultpath = vaultPath
}
}
port := AllocateParallelPort(8200)
if vaultpath != "" {
return &VaultFactory{
vaultpath: vaultpath,
Port: port,
}, nil
}
// try to grab one form docker...
tmpdir, err := ioutil.TempDir(os.Getenv("HELPER_TMP"), "vault")
if err != nil {
return nil, err
}
bash := fmt.Sprintf(`
set -ex
CID=$(docker run -d %s /bin/sh -c exit)
# just print the image sha for repoducibility
echo "Using Vault Image:"
docker inspect %s -f "{{.RepoDigests}}"
docker cp $CID:/bin/vault .
docker rm -f $CID
`, defaultVaultDockerImage, defaultVaultDockerImage)
scriptfile := filepath.Join(tmpdir, "getvault.sh")
ioutil.WriteFile(scriptfile, []byte(bash), 0755)
cmd := exec.Command("bash", scriptfile)
cmd.Dir = tmpdir
cmd.Stdout = ginkgo.GinkgoWriter
cmd.Stderr = ginkgo.GinkgoWriter
if err := cmd.Run(); err != nil {
return nil, err
}
return &VaultFactory{
vaultpath: filepath.Join(tmpdir, "vault"),
tmpdir: tmpdir,
Port: port,
}, nil
}
func (ef *VaultFactory) Clean() error {
if ef == nil {
return nil
}
if ef.tmpdir != "" {
os.RemoveAll(ef.tmpdir)
}
return nil
}
type VaultInstance struct {
vaultpath string
tmpdir string
cmd *exec.Cmd
token string
Port int
}
func (ef *VaultFactory) NewVaultInstance() (*VaultInstance, error) {
// try to grab one form docker...
tmpdir, err := ioutil.TempDir(os.Getenv("HELPER_TMP"), "vault")
if err != nil {
return nil, err
}
return &VaultInstance{
vaultpath: ef.vaultpath,
tmpdir: tmpdir,
Port: ef.Port,
}, nil
}
func (i *VaultInstance) Run() error {
return i.RunWithPort()
}
func (i *VaultInstance) Token() string {
return i.token
}
func (i *VaultInstance) RunWithPort() error {
cmd := exec.Command(i.vaultpath,
"server",
"-dev",
"-dev-root-token-id=root",
fmt.Sprintf("-dev-listen-address=0.0.0.0:%v", i.Port),
)
buf := &syncutils.Buffer{}
w := io.MultiWriter(ginkgo.GinkgoWriter, buf)
cmd.Dir = i.tmpdir
cmd.Stdout = w
cmd.Stderr = w
err := cmd.Start()
if err != nil {
return err
}
i.cmd = cmd
time.Sleep(time.Millisecond * 2500)
tokenSlice := regexp.MustCompile("Root Token: ([\\-[:word:]]+)").FindAllString(buf.String(), 1)
if len(tokenSlice) < 1 {
return errors.Errorf("%s did not contain root token", buf.String())
}
i.token = strings.TrimPrefix(tokenSlice[0], "Root Token: ")
enableCmd := exec.Command(i.vaultpath,
"secrets",
"enable",
fmt.Sprintf("-address=http://127.0.0.1:%v", i.Port),
"-version=2",
"kv")
enableCmd.Env = append(enableCmd.Env, "VAULT_TOKEN="+i.token)
// enable kv storage
enableCmdOut, err := enableCmd.CombinedOutput()
if err != nil {
return errors.Wrapf(err, "enabling kv storage failed: %s", enableCmdOut)
}
return nil
}
func (i *VaultInstance) Binary() string {
return i.vaultpath
}
func (i *VaultInstance) Clean() error {
if i.cmd != nil {
i.cmd.Process.Kill()
i.cmd.Wait()
}
if i.tmpdir != "" {
os.RemoveAll(i.tmpdir)
}
return nil
}
func (i *VaultInstance) Exec(args ...string) (string, error) {
cmd := exec.Command(i.vaultpath, args...)
cmd.Env = os.Environ()
// disable DEBUG=1 from getting through to nomad
for i, pair := range cmd.Env {
if strings.HasPrefix(pair, "DEBUG") {
cmd.Env = append(cmd.Env[:i], cmd.Env[i+1:]...)
break
}
}
out, err := cmd.CombinedOutput()
if err != nil {
err = fmt.Errorf("%s (%v)", out, err)
}
return string(out), err
}
| [
"\"VAULT_BINARY\"",
"\"HELPER_TMP\"",
"\"HELPER_TMP\""
]
| []
| [
"HELPER_TMP",
"VAULT_BINARY"
]
| [] | ["HELPER_TMP", "VAULT_BINARY"] | go | 2 | 0 | |
model.py | """model.py"""
import torch.nn as nn
import torch.nn.init as init
class Discriminator(nn.Module):
def __init__(self, z_dim):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 2),
)
self.weight_init()
def weight_init(self, mode='normal'):
if mode == 'kaiming':
initializer = kaiming_init
elif mode == 'normal':
initializer = normal_init
for block in self._modules:
for m in self._modules[block]:
initializer(m)
def forward(self, z):
return self.net(z).squeeze()
class RF_VAE1(nn.Module):
"""Encoder and Decoder architecture for 2D Shapes data."""
def __init__(self, z_dim=10):
super(RF_VAE1, self).__init__()
self.z_dim = z_dim
self.encode = nn.Sequential(
nn.Conv2d(1, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 128, 4, 1),
nn.ReLU(True),
nn.Conv2d(128, 2*z_dim, 1)
)
self.decode = nn.Sequential(
nn.Conv2d(z_dim, 128, 1),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 1, 4, 2, 1),
)
self.weight_init()
def weight_init(self, mode='normal'):
if mode == 'kaiming':
initializer = kaiming_init
elif mode == 'normal':
initializer = normal_init
for block in self._modules:
for m in self._modules[block]:
initializer(m)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
def forward(self, x, no_dec=False):
stats = self.encode(x)
mu = stats[:, :self.z_dim]
logvar = stats[:, self.z_dim:]
z = self.reparametrize(mu, logvar)
if no_dec:
return z.squeeze()
else:
x_recon = self.decode(z).view(x.size())
return x_recon, mu, logvar, z.squeeze()
class RF_VAE2(nn.Module):
"""Encoder and Decoder architecture for 3D Shapes, Celeba, Chairs data."""
def __init__(self, z_dim=10):
super(RF_VAE2, self).__init__()
self.z_dim = z_dim
self.encode = nn.Sequential(
nn.Conv2d(3, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 256, 4, 1),
nn.ReLU(True),
nn.Conv2d(256, 2*z_dim, 1)
)
self.decode = nn.Sequential(
nn.Conv2d(z_dim, 256, 1),
nn.ReLU(True),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 3, 4, 2, 1),
)
self.weight_init()
def weight_init(self, mode='normal'):
if mode == 'kaiming':
initializer = kaiming_init
elif mode == 'normal':
initializer = normal_init
for block in self._modules:
for m in self._modules[block]:
initializer(m)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
def forward(self, x, no_dec=False):
stats = self.encode(x)
mu = stats[:, :self.z_dim]
logvar = stats[:, self.z_dim:]
z = self.reparametrize(mu, logvar)
if no_dec:
return z.squeeze()
else:
x_recon = self.decode(z)
return x_recon, mu, logvar, z.squeeze()
class FactorVAE3(nn.Module):
"""Encoder and Decoder architecture for 3D Faces data."""
def __init__(self, z_dim=10):
super(FactorVAE3, self).__init__()
self.z_dim = z_dim
self.encode = nn.Sequential(
nn.Conv2d(1, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 256, 4, 1),
nn.ReLU(True),
nn.Conv2d(256, 2*z_dim, 1)
)
self.decode = nn.Sequential(
nn.Conv2d(z_dim, 256, 1),
nn.ReLU(True),
nn.ConvTranspose2d(256, 64, 4),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(64, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.ConvTranspose2d(32, 1, 4, 2, 1),
)
self.weight_init()
def weight_init(self, mode='normal'):
if mode == 'kaiming':
initializer = kaiming_init
elif mode == 'normal':
initializer = normal_init
for block in self._modules:
for m in self._modules[block]:
initializer(m)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
def forward(self, x, no_dec=False):
stats = self.encode(x)
mu = stats[:, :self.z_dim]
logvar = stats[:, self.z_dim:]
z = self.reparametrize(mu, logvar)
if no_dec:
return z.squeeze()
else:
x_recon = self.decode(z)
return x_recon, mu, logvar, z.squeeze()
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.normal_(m.weight, 0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
| []
| []
| []
| [] | [] | python | null | null | null |
escapes.go | package main
import (
"fmt"
"os"
)
type escape struct {
fail, pass, skip, zero, nope, endc string
rgb func(rgb [3]uint8) string
uri func(url, text string) string
em func(text string) string
}
const (
fullEsc = iota
monoEsc
bareEsc
testEsc
)
var escapes = []*escape{
{
fail: "\033[38;5;124m",
pass: "\033[38;5;034m",
skip: "\033[38;5;244m",
zero: "\033[38;5;172m",
nope: "\033[00000000m",
endc: "\033[0m",
rgb: func(rgb [3]uint8) string {
return fmt.Sprintf("\033[38;2;%d;%d;%dm", rgb[0], rgb[1], rgb[2])
},
uri: func(url string, text string) string {
return fmt.Sprintf("\033]8;;%s\033\\%s\033]8;;\033\\", url, text)
},
em: func(text string) string {
return fmt.Sprintf("\033[3m%s\033[23m", text)
},
}, {
fail: "\033[7m", // reversed
pass: "\033[1m", // bold
skip: "\033[2m", // dim
zero: "\033[1m", // bold
nope: "\033[0m",
endc: "\033[0m",
rgb: func(rgb [3]uint8) string { return "" },
uri: func(url string, text string) string {
return fmt.Sprintf("\033]8;;%s\033\\%s\033]8;;\033\\", url, text)
},
em: func(text string) string {
return fmt.Sprintf("\033[3m%s\033[23m", text)
},
}, {
fail: "", pass: "", skip: "", zero: "", nope: "", endc: "",
rgb: func(rgb [3]uint8) string { return "" },
uri: func(url string, text string) string { return text },
em: func(text string) string { return "*" + text + "*" },
}, {
fail: "FAIL",
pass: "PASS",
skip: "SKIP",
zero: "ZERO",
nope: "NOPE",
endc: "ENDC",
rgb: func(rgb [3]uint8) string {
return fmt.Sprintf("#%2x%2x%2x", rgb[0], rgb[1], rgb[2])
},
uri: func(url string, text string) string {
return fmt.Sprintf("[%s](%s)", text, url)
},
em: func(text string) string {
return "*" + text + "*"
},
},
}
func (esc *escape) setEscape(override string) *escape {
*esc = *guessEscape(override)
return esc
}
func guessEscape(override string) *escape {
switch override {
case "":
// meh
case "full":
return escapes[fullEsc]
case "mono":
return escapes[monoEsc]
case "bare":
return escapes[bareEsc]
case "test":
return escapes[testEsc]
default:
// meh
}
// HMMMMMMM.
// the NO_COLOR ‘informal standard’ simply says
//
// All command-line software which outputs text with ANSI
// color added should check for the presence of a NO_COLOR
// environment variable that, when present (regardless of
// its value), prevents the addition of ANSI color.
//
// but some people take that to mean "just no colour", and
// some people take it to mean "no ANSI escape codes". So, if
// the env var is there, we go no colour. If it is set to
// "strict", we go no escape codes at all.
if noTerm, set := os.LookupEnv("NO_COLOR"); set {
if noTerm == "strict" {
return escapes[bareEsc]
}
return escapes[monoEsc]
}
// ok, so no NO_COLOR. Let's look at TERM next.
// TODO: use terminfo, like a baws :-)
term := os.Getenv("TERM")
if term == "" || term == "dumb" {
return escapes[bareEsc]
}
if term == "linux" || term == "xterm-256color" {
return escapes[fullEsc]
}
if os.Getenv("COLORTERM") == "truecolor" {
return escapes[fullEsc]
}
return escapes[monoEsc]
}
| [
"\"TERM\"",
"\"COLORTERM\""
]
| []
| [
"COLORTERM",
"TERM"
]
| [] | ["COLORTERM", "TERM"] | go | 2 | 0 | |
internal/aws/k8s/k8sclient/clientset.go | // Copyright OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sclient // import "github.com/rati3l/opentelemetry-collector-contrib/internal/aws/k8s/k8sclient"
import (
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
"time"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
cacheTTL = 10 * time.Minute
)
// Option is a struct that can be used to change the configuration of passed K8sClient
// It can be used as an option to the Get(...) function to create a customized K8sClient
type Option struct {
name string
set func(*K8sClient)
}
var mu = &sync.Mutex{}
var optionsToK8sClient = map[string]*K8sClient{}
type stopper interface {
shutdown()
}
func shutdownClient(client stopper, mu *sync.Mutex, afterShutdown func()) {
mu.Lock()
if client != nil {
client.shutdown()
afterShutdown()
}
mu.Unlock()
}
type cacheReflector interface {
LastSyncResourceVersion() string
Run(<-chan struct{})
}
type initialSyncChecker interface {
// check the initial sync of cache reflector and log the warnMessage if timeout
Check(reflector cacheReflector, warnMessage string)
}
// reflectorSyncChecker implements initialSyncChecker interface
type reflectorSyncChecker struct {
pollInterval time.Duration
pollTimeout time.Duration
logger *zap.Logger
}
func (r *reflectorSyncChecker) Check(reflector cacheReflector, warnMessage string) {
if err := wait.Poll(r.pollInterval, r.pollTimeout, func() (done bool, err error) {
return reflector.LastSyncResourceVersion() != "", nil
}); err != nil {
r.logger.Warn(warnMessage, zap.Error(err))
}
}
// KubeConfigPath provides the option to set the kube config which will be used if the
// service account that kubernetes gives to pods can't be used
func KubeConfigPath(kubeConfigPath string) Option {
return Option{
name: "kubeConfigPath:" + kubeConfigPath,
set: func(kc *K8sClient) {
kc.kubeConfigPath = kubeConfigPath
},
}
}
// InitSyncPollInterval provides the option to set the init sync poll interval
// for testing connection to kubernetes api server
func InitSyncPollInterval(pollInterval time.Duration) Option {
return Option{
name: "initSyncPollInterval:" + pollInterval.String(),
set: func(kc *K8sClient) {
kc.initSyncPollInterval = pollInterval
},
}
}
// InitSyncPollTimeout provides the option to set the init sync poll timeout
// for testing connection to kubernetes api server
func InitSyncPollTimeout(pollTimeout time.Duration) Option {
return Option{
name: "initSyncPollTimeout:" + pollTimeout.String(),
set: func(kc *K8sClient) {
kc.initSyncPollTimeout = pollTimeout
},
}
}
func getStringifiedOptions(options ...Option) string {
opts := make([]string, 0)
for _, option := range options {
opts = append(opts, option.name)
}
sort.Strings(opts)
return strings.Join(opts, "+")
}
// Get returns a singleton instance of k8s client
// If the intialization fails, it returns nil
func Get(logger *zap.Logger, options ...Option) *K8sClient {
strOptions := getStringifiedOptions(options...)
mu.Lock()
if optionsToK8sClient[strOptions] == nil {
//construct the k8s client
k8sClient := new(K8sClient)
err := k8sClient.init(logger, options...)
if err == nil {
optionsToK8sClient[strOptions] = k8sClient
}
}
mu.Unlock()
return optionsToK8sClient[strOptions]
}
type epClientWithStopper interface {
EpClient
stopper
}
type jobClientWithStopper interface {
JobClient
stopper
}
type nodeClientWithStopper interface {
NodeClient
stopper
}
type podClientWithStopper interface {
PodClient
stopper
}
type replicaSetClientWithStopper interface {
ReplicaSetClient
stopper
}
type K8sClient struct {
kubeConfigPath string
initSyncPollInterval time.Duration
initSyncPollTimeout time.Duration
clientSet kubernetes.Interface
syncChecker *reflectorSyncChecker
epMu sync.Mutex
ep epClientWithStopper
podMu sync.Mutex
pod podClientWithStopper
nodeMu sync.Mutex
node nodeClientWithStopper
jobMu sync.Mutex
job jobClientWithStopper
rsMu sync.Mutex
replicaSet replicaSetClientWithStopper
logger *zap.Logger
}
func (c *K8sClient) init(logger *zap.Logger, options ...Option) error {
c.logger = logger
// set up some default configs
c.kubeConfigPath = filepath.Join(os.Getenv("HOME"), ".kube/config")
c.initSyncPollInterval = 50 * time.Millisecond
c.initSyncPollTimeout = 2 * time.Second
// take additional options passed in
for _, opt := range options {
opt.set(c)
}
config, err := rest.InClusterConfig()
if err != nil {
c.logger.Warn("cannot find in cluster config", zap.Error(err))
config, err = clientcmd.BuildConfigFromFlags("", c.kubeConfigPath)
if err != nil {
c.logger.Error("failed to build config", zap.Error(err))
return err
}
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
c.logger.Error("failed to build ClientSet", zap.Error(err))
return err
}
c.syncChecker = &reflectorSyncChecker{
pollInterval: c.initSyncPollInterval,
pollTimeout: c.initSyncPollTimeout,
logger: c.logger,
}
c.clientSet = client
c.ep = nil
c.pod = nil
c.node = nil
c.job = nil
c.replicaSet = nil
return nil
}
func (c *K8sClient) GetEpClient() EpClient {
c.epMu.Lock()
if c.ep == nil {
c.ep = newEpClient(c.clientSet, c.logger, epSyncCheckerOption(c.syncChecker))
}
c.epMu.Unlock()
return c.ep
}
func (c *K8sClient) ShutdownEpClient() {
shutdownClient(c.ep, &c.epMu, func() {
c.ep = nil
})
}
func (c *K8sClient) GetPodClient() PodClient {
c.podMu.Lock()
if c.pod == nil {
c.pod = newPodClient(c.clientSet, c.logger, podSyncCheckerOption(c.syncChecker))
}
c.podMu.Unlock()
return c.pod
}
func (c *K8sClient) ShutdownPodClient() {
shutdownClient(c.pod, &c.podMu, func() {
c.pod = nil
})
}
func (c *K8sClient) GetNodeClient() NodeClient {
c.nodeMu.Lock()
if c.node == nil {
c.node = newNodeClient(c.clientSet, c.logger, nodeSyncCheckerOption(c.syncChecker))
}
c.nodeMu.Unlock()
return c.node
}
func (c *K8sClient) ShutdownNodeClient() {
shutdownClient(c.node, &c.nodeMu, func() {
c.node = nil
})
}
func (c *K8sClient) GetJobClient() JobClient {
var err error
c.jobMu.Lock()
if c.job == nil {
c.job, err = newJobClient(c.clientSet, c.logger, jobSyncCheckerOption(c.syncChecker))
if err != nil {
c.logger.Error("use an no-op job client instead because of error", zap.Error(err))
c.job = &noOpJobClient{}
}
}
c.jobMu.Unlock()
return c.job
}
func (c *K8sClient) ShutdownJobClient() {
shutdownClient(c.job, &c.jobMu, func() {
c.job = nil
})
}
func (c *K8sClient) GetReplicaSetClient() ReplicaSetClient {
var err error
c.rsMu.Lock()
if c.replicaSet == nil || reflect.ValueOf(c.replicaSet).IsNil() {
c.replicaSet, err = newReplicaSetClient(c.clientSet, c.logger, replicaSetSyncCheckerOption(c.syncChecker))
if err != nil {
c.logger.Error("use an no-op replica set client instead because of error", zap.Error(err))
c.replicaSet = &noOpReplicaSetClient{}
}
}
c.rsMu.Unlock()
return c.replicaSet
}
func (c *K8sClient) ShutdownReplicaSetClient() {
shutdownClient(c.replicaSet, &c.rsMu, func() {
c.replicaSet = nil
})
}
func (c *K8sClient) GetClientSet() kubernetes.Interface {
return c.clientSet
}
// Shutdown stops K8sClient
func (c *K8sClient) Shutdown() {
mu.Lock()
defer mu.Unlock()
c.ShutdownEpClient()
c.ShutdownPodClient()
c.ShutdownNodeClient()
c.ShutdownJobClient()
c.ShutdownReplicaSetClient()
// remove the current instance of k8s client from map
for key, val := range optionsToK8sClient {
if val == c {
delete(optionsToK8sClient, key)
}
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
strangefish/utilities/stockfish.py | import os
import chess.engine
# make sure stockfish environment variable exists
if "STOCKFISH_EXECUTABLE" not in os.environ:
raise KeyError('This bot requires an environment variable called "STOCKFISH_EXECUTABLE"'
' pointing to the Stockfish executable')
# make sure there is actually a file
STOCKFISH_EXECUTABLE = os.getenv('STOCKFISH_EXECUTABLE')
if not os.path.exists(STOCKFISH_EXECUTABLE):
raise ValueError('No stockfish executable found at "{}"'.format(STOCKFISH_EXECUTABLE))
def create_engine():
engine = chess.engine.SimpleEngine.popen_uci(STOCKFISH_EXECUTABLE)
# engine.configure({'Threads': os.cpu_count()})
return engine
| []
| []
| [
"STOCKFISH_EXECUTABLE"
]
| [] | ["STOCKFISH_EXECUTABLE"] | python | 1 | 0 | |
mongodb-datastore/handlers/config.go | package handlers
import (
"fmt"
"os"
)
var (
mongoDBHost = os.Getenv("MONGODB_HOST")
mongoDBName = os.Getenv("MONGODB_DATABASE")
mongoDBUser = os.Getenv("MONGODB_USER")
mongoDBPassword = os.Getenv("MONGODB_PASSWORD")
mongoDBConnection = fmt.Sprintf("mongodb://%s:%s@%s/%s", mongoDBUser, mongoDBPassword, mongoDBHost, mongoDBName)
)
const (
eventsCollectionName = "keptnUnmappedEvents"
serviceName = "mongodb-datastore"
)
| [
"\"MONGODB_HOST\"",
"\"MONGODB_DATABASE\"",
"\"MONGODB_USER\"",
"\"MONGODB_PASSWORD\""
]
| []
| [
"MONGODB_USER",
"MONGODB_HOST",
"MONGODB_PASSWORD",
"MONGODB_DATABASE"
]
| [] | ["MONGODB_USER", "MONGODB_HOST", "MONGODB_PASSWORD", "MONGODB_DATABASE"] | go | 4 | 0 | |
cmd/serv.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2016 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/pprof"
"code.gitea.io/gitea/modules/private"
"code.gitea.io/gitea/modules/setting"
"github.com/dgrijalva/jwt-go"
"github.com/unknwon/com"
"github.com/urfave/cli"
)
const (
lfsAuthenticateVerb = "git-lfs-authenticate"
)
// CmdServ represents the available serv sub-command.
var CmdServ = cli.Command{
Name: "serv",
Usage: "This command should only be called by SSH shell",
Description: `Serv provide access auth for repositories`,
Action: runServ,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "enable-pprof",
},
},
}
func setup(logPath string) {
_ = log.DelLogger("console")
setting.NewContext()
}
func parseCmd(cmd string) (string, string) {
ss := strings.SplitN(cmd, " ", 2)
if len(ss) != 2 {
return "", ""
}
return ss[0], strings.Replace(ss[1], "'/", "'", 1)
}
var (
allowedCommands = map[string]models.AccessMode{
"git-upload-pack": models.AccessModeRead,
"git-upload-archive": models.AccessModeRead,
"git-receive-pack": models.AccessModeWrite,
lfsAuthenticateVerb: models.AccessModeNone,
}
)
func fail(userMessage, logMessage string, args ...interface{}) {
fmt.Fprintln(os.Stderr, "Gitea:", userMessage)
if len(logMessage) > 0 {
if !setting.ProdMode {
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
}
}
os.Exit(1)
}
func runServ(c *cli.Context) error {
// FIXME: This needs to internationalised
setup("serv.log")
if setting.SSH.Disabled {
println("Gitea: SSH has been disabled")
return nil
}
if len(c.Args()) < 1 {
if err := cli.ShowSubcommandHelp(c); err != nil {
fmt.Printf("error showing subcommand help: %v\n", err)
}
return nil
}
keys := strings.Split(c.Args()[0], "-")
if len(keys) != 2 || keys[0] != "key" {
fail("Key ID format error", "Invalid key argument: %s", c.Args()[0])
}
keyID := com.StrTo(keys[1]).MustInt64()
cmd := os.Getenv("SSH_ORIGINAL_COMMAND")
if len(cmd) == 0 {
key, user, err := private.ServNoCommand(keyID)
if err != nil {
fail("Internal error", "Failed to check provided key: %v", err)
}
if key.Type == models.KeyTypeDeploy {
println("Hi there! You've successfully authenticated with the deploy key named " + key.Name + ", but Gitea does not provide shell access.")
} else {
println("Hi there, " + user.Name + "! You've successfully authenticated with the key named " + key.Name + ", but Gitea does not provide shell access.")
}
println("If this is unexpected, please log in with password and setup Gitea under another user.")
return nil
}
verb, args := parseCmd(cmd)
var lfsVerb string
if verb == lfsAuthenticateVerb {
if !setting.LFS.StartServer {
fail("Unknown git command", "LFS authentication request over SSH denied, LFS support is disabled")
}
argsSplit := strings.Split(args, " ")
if len(argsSplit) >= 2 {
args = strings.TrimSpace(argsSplit[0])
lfsVerb = strings.TrimSpace(argsSplit[1])
}
}
repoPath := strings.ToLower(strings.Trim(args, "'"))
rr := strings.SplitN(repoPath, "/", 2)
if len(rr) != 2 {
fail("Invalid repository path", "Invalid repository path: %v", args)
}
username := strings.ToLower(rr[0])
reponame := strings.ToLower(strings.TrimSuffix(rr[1], ".git"))
if setting.EnablePprof || c.Bool("enable-pprof") {
if err := os.MkdirAll(setting.PprofDataPath, os.ModePerm); err != nil {
fail("Error while trying to create PPROF_DATA_PATH", "Error while trying to create PPROF_DATA_PATH: %v", err)
}
stopCPUProfiler, err := pprof.DumpCPUProfileForUsername(setting.PprofDataPath, username)
if err != nil {
fail("Internal Server Error", "Unable to start CPU profile: %v", err)
}
defer func() {
stopCPUProfiler()
err := pprof.DumpMemProfileForUsername(setting.PprofDataPath, username)
if err != nil {
fail("Internal Server Error", "Unable to dump Mem Profile: %v", err)
}
}()
}
requestedMode, has := allowedCommands[verb]
if !has {
fail("Unknown git command", "Unknown git command %s", verb)
}
if verb == lfsAuthenticateVerb {
if lfsVerb == "upload" {
requestedMode = models.AccessModeWrite
} else if lfsVerb == "download" {
requestedMode = models.AccessModeRead
} else {
fail("Unknown LFS verb", "Unknown lfs verb %s", lfsVerb)
}
}
results, err := private.ServCommand(keyID, username, reponame, requestedMode, verb, lfsVerb)
if err != nil {
if private.IsErrServCommand(err) {
errServCommand := err.(private.ErrServCommand)
if errServCommand.StatusCode != http.StatusInternalServerError {
fail("Unauthorized", "%s", errServCommand.Error())
} else {
fail("Internal Server Error", "%s", errServCommand.Error())
}
}
fail("Internal Server Error", "%s", err.Error())
}
os.Setenv(models.EnvRepoIsWiki, strconv.FormatBool(results.IsWiki))
os.Setenv(models.EnvRepoName, results.RepoName)
os.Setenv(models.EnvRepoUsername, results.OwnerName)
os.Setenv(models.EnvPusherName, results.UserName)
os.Setenv(models.EnvPusherID, strconv.FormatInt(results.UserID, 10))
os.Setenv(models.ProtectedBranchRepoID, strconv.FormatInt(results.RepoID, 10))
os.Setenv(models.ProtectedBranchPRID, fmt.Sprintf("%d", 0))
os.Setenv(models.EnvIsDeployKey, fmt.Sprintf("%t", results.IsDeployKey))
os.Setenv(models.EnvKeyID, fmt.Sprintf("%d", results.KeyID))
//LFS token authentication
if verb == lfsAuthenticateVerb {
url := fmt.Sprintf("%s%s/%s.git/info/lfs", setting.AppURL, url.PathEscape(results.OwnerName), url.PathEscape(results.RepoName))
now := time.Now()
claims := jwt.MapClaims{
"repo": results.RepoID,
"op": lfsVerb,
"exp": now.Add(setting.LFS.HTTPAuthExpiry).Unix(),
"nbf": now.Unix(),
"user": results.UserID,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString(setting.LFS.JWTSecretBytes)
if err != nil {
fail("Internal error", "Failed to sign JWT token: %v", err)
}
tokenAuthentication := &models.LFSTokenResponse{
Header: make(map[string]string),
Href: url,
}
tokenAuthentication.Header["Authorization"] = fmt.Sprintf("Bearer %s", tokenString)
enc := json.NewEncoder(os.Stdout)
err = enc.Encode(tokenAuthentication)
if err != nil {
fail("Internal error", "Failed to encode LFS json response: %v", err)
}
return nil
}
// Special handle for Windows.
if setting.IsWindows {
verb = strings.Replace(verb, "-", " ", 1)
}
var gitcmd *exec.Cmd
verbs := strings.Split(verb, " ")
if len(verbs) == 2 {
gitcmd = exec.Command(verbs[0], verbs[1], repoPath)
} else {
gitcmd = exec.Command(verb, repoPath)
}
gitcmd.Dir = setting.RepoRootPath
gitcmd.Stdout = os.Stdout
gitcmd.Stdin = os.Stdin
gitcmd.Stderr = os.Stderr
if err = gitcmd.Run(); err != nil {
fail("Internal error", "Failed to execute git command: %v", err)
}
// Update user key activity.
if results.KeyID > 0 {
if err = private.UpdatePublicKeyInRepo(results.KeyID, results.RepoID); err != nil {
fail("Internal error", "UpdatePublicKeyInRepo: %v", err)
}
}
return nil
}
| [
"\"SSH_ORIGINAL_COMMAND\""
]
| []
| [
"SSH_ORIGINAL_COMMAND"
]
| [] | ["SSH_ORIGINAL_COMMAND"] | go | 1 | 0 | |
cmd/list.go | /*
Copyright © 2021 IMRAN ALIYEV <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"log"
"os"
"github.com/microsoft/azure-devops-go-api/azuredevops/core"
"github.com/polariscode/AzureDevopsCli/azureclient"
"github.com/spf13/cobra"
)
// listCmd represents the list command
var listCmd = &cobra.Command{
Use: "list",
Short: "Returns list of projects",
Long: `Return full list of projects from AzureDevops`,
Run: func(cmd *cobra.Command, args []string) {
url := os.Getenv("azure_project_url")
token := os.Getenv("azure_token")
client, ctx := azureclient.CreateClient(url, token)
// Get first page of the list of team projects for your organization
responseValue, err := client.GetProjects(ctx, core.GetProjectsArgs{})
if err != nil {
log.Fatal(err)
}
for responseValue != nil {
// Log the page of team project names
for _, team := range (*responseValue).Value {
desc := ""
if team.Description != nil {
desc = *team.Description
if len(desc) > 60 {
desc = desc[:60] + "..."
}
}
fmt.Printf("- %-50v %v\n", *team.Name, desc)
}
// if continuationToken has a value, then there is at least one more page of projects to get
if responseValue.ContinuationToken != "" {
// Get next page of team projects
projectArgs := core.GetProjectsArgs{
ContinuationToken: &responseValue.ContinuationToken,
}
responseValue, err = client.GetProjects(ctx, projectArgs)
if err != nil {
log.Fatal(err)
}
} else {
responseValue = nil
}
}
},
}
func init() {
rootCmd.AddCommand(listCmd)
// listCmd.PersistentFlags().String("top", "", "Get top N elements")
}
| [
"\"azure_project_url\"",
"\"azure_token\""
]
| []
| [
"azure_token",
"azure_project_url"
]
| [] | ["azure_token", "azure_project_url"] | go | 2 | 0 | |
main.go | package main
import (
"fmt"
"github.com/gorilla/mux"
bslack "github.com/twodarek/barcampgr-teams-bot/barcampgr/slack"
"github.com/twodarek/barcampgr-teams-bot/barcampgr/teams"
"github.com/twodarek/barcampgr-teams-bot/database"
webexteams "github.com/twodarek/go-cisco-webex-teams/sdk"
"log"
"net/http"
"os"
"github.com/slack-go/slack"
"github.com/twodarek/barcampgr-teams-bot/barcampgr"
"github.com/twodarek/barcampgr-teams-bot/server"
)
func main() {
log.Println("Attempting to start barcampgr-teams-bot")
router := mux.NewRouter()
httpClient := &http.Client{}
conf := barcampgr.Config{
SlackAPIToken: os.Getenv("SLACK_API_TOKEN"),
TeamsAPIToken: os.Getenv("CISCO_TEAMS_API_TOKEN"),
BarCampGRWebexId: os.Getenv("BARCAMPGR_WEBEX_ID"),
BaseCallbackURL: os.Getenv("BARCAMPGR_BASE_CALLBACK_URL"),
MySqlUser: os.Getenv("MYSQL_USER"),
MySqlPass: os.Getenv("MYSQL_PASS"),
MySqlServer: os.Getenv("MYSQL_SERVER"),
MySqlPort: os.Getenv("MYSQL_PORT"),
MySqlDatabase: os.Getenv("MYSQL_DATABASE"),
AdminPassword: os.Getenv("BARCAMPGR_ADMIN_PASSWORD"),
InvitePassword: os.Getenv("BARCAMPGR_INVITE_PASSWORD"),
SlackCallbackURL: os.Getenv("SLACK_CALLBACK_URL"),
SlackUsername: os.Getenv("SLACK_USERNAME"),
SlackVerificationToken: os.Getenv("SLACK_VERIFICATION_TOKEN"),
WebexTeamID: os.Getenv("BARCAMPGR_TEAM_ID"),
WebexOrgID: os.Getenv("WEBEX_ORG_ID"),
WebexRoomID: os.Getenv("WEBEX_ROOM_ID"),
WebexCallbackURL: os.Getenv("WEBEX_CALLBACK_URL"),
WebexMembershipCallbackURL: os.Getenv("WEBEX_MEMBERSHIP_CALLBACK_URL"),
}
conf.SetWebexAllRooms(os.Getenv("WEBEX_ALL_ROOMS"))
log.Println("Attempting to start slack client")
slackClient := slack.New(conf.SlackAPIToken, slack.OptionDebug(true))
log.Println("Slack client started")
log.Println("Attempting to start webex teams client")
teamsClient := webexteams.NewClient()
initTeamsClient(teamsClient, conf)
log.Println("Webex teams client started, connecting to database")
sdb := database.NewDatabase(conf.MySqlUser, conf.MySqlPass, conf.MySqlServer, conf.MySqlPort, conf.MySqlDatabase)
log.Println("Database connected")
ac := barcampgr.NewAppController(
httpClient,
sdb,
conf,
)
sac := bslack.NewAppController(
ac,
slackClient,
httpClient,
sdb,
conf,
)
tac := teams.NewAppController(
ac,
teamsClient,
httpClient,
sdb,
conf,
)
s := server.New(ac, sac, tac, conf, router)
// Multiple codepaths use the DefaultServeMux so we start listening at the top
go http.ListenAndServe("0.0.0.0:8080", s)
log.Println("Barcampgr-teams-bot started")
select {}
}
func initTeamsClient(client *webexteams.Client, config barcampgr.Config) error {
client.SetAuthToken(config.TeamsAPIToken)
// Clean up old webhooks
webhooksQueryParams := &webexteams.ListWebhooksQueryParams{
Max: 10,
}
webhooks, _, err := client.Webhooks.ListWebhooks(webhooksQueryParams)
if err != nil {
log.Printf("Unable to get old webhooks, continuing anyway")
}
for _, webhook := range webhooks.Items {
_, err := client.Webhooks.DeleteWebhook(webhook.ID)
if err != nil {
log.Printf("Unable to clean up old webhook %s on endpoint %s", webhook.ID, webhook.TargetURL)
}
}
// Create new @bot message webhook
webhookRequest := &webexteams.WebhookCreateRequest{
Name: "BarCampGR Webhook",
TargetURL: fmt.Sprintf("%s%s", config.BaseCallbackURL, config.WebexCallbackURL),
Resource: "messages",
Event: "created",
}
testWebhook, _, err := client.Webhooks.CreateWebhook(webhookRequest)
if err != nil {
log.Fatal(fmt.Printf("Failed to create webhook: %s", err))
}
log.Printf("Created chatop webhook. ID: %s, Name: %s, target URL: %s, created: %s", testWebhook.ID, testWebhook.Name, testWebhook.TargetURL, testWebhook.Created)
// Create new memberships webhook
membershipWebhookRequest := &webexteams.WebhookCreateRequest{
Name: "BarCampGR Memberships Webhook",
TargetURL: fmt.Sprintf("%s%s", config.BaseCallbackURL, config.WebexMembershipCallbackURL),
Resource: "memberships",
Event: "created",
Filter: fmt.Sprintf("roomId=%s", config.WebexRoomID),
}
testMembershipWebhook, _, err := client.Webhooks.CreateWebhook(membershipWebhookRequest)
if err != nil {
log.Fatal(fmt.Printf("Failed to create webhook: %s", err))
}
log.Printf("Created membership webhook. ID: %s, Name: %s, target URL: %s, created: %s", testMembershipWebhook.ID, testMembershipWebhook.Name, testMembershipWebhook.TargetURL, testMembershipWebhook.Created)
return nil
} | [
"\"SLACK_API_TOKEN\"",
"\"CISCO_TEAMS_API_TOKEN\"",
"\"BARCAMPGR_WEBEX_ID\"",
"\"BARCAMPGR_BASE_CALLBACK_URL\"",
"\"MYSQL_USER\"",
"\"MYSQL_PASS\"",
"\"MYSQL_SERVER\"",
"\"MYSQL_PORT\"",
"\"MYSQL_DATABASE\"",
"\"BARCAMPGR_ADMIN_PASSWORD\"",
"\"BARCAMPGR_INVITE_PASSWORD\"",
"\"SLACK_CALLBACK_URL\"",
"\"SLACK_USERNAME\"",
"\"SLACK_VERIFICATION_TOKEN\"",
"\"BARCAMPGR_TEAM_ID\"",
"\"WEBEX_ORG_ID\"",
"\"WEBEX_ROOM_ID\"",
"\"WEBEX_CALLBACK_URL\"",
"\"WEBEX_MEMBERSHIP_CALLBACK_URL\"",
"\"WEBEX_ALL_ROOMS\""
]
| []
| [
"WEBEX_ALL_ROOMS",
"MYSQL_USER",
"MYSQL_PORT",
"WEBEX_CALLBACK_URL",
"SLACK_VERIFICATION_TOKEN",
"BARCAMPGR_INVITE_PASSWORD",
"SLACK_CALLBACK_URL",
"MYSQL_PASS",
"MYSQL_SERVER",
"BARCAMPGR_TEAM_ID",
"BARCAMPGR_WEBEX_ID",
"BARCAMPGR_ADMIN_PASSWORD",
"WEBEX_ORG_ID",
"BARCAMPGR_BASE_CALLBACK_URL",
"WEBEX_ROOM_ID",
"SLACK_API_TOKEN",
"CISCO_TEAMS_API_TOKEN",
"SLACK_USERNAME",
"WEBEX_MEMBERSHIP_CALLBACK_URL",
"MYSQL_DATABASE"
]
| [] | ["WEBEX_ALL_ROOMS", "MYSQL_USER", "MYSQL_PORT", "WEBEX_CALLBACK_URL", "SLACK_VERIFICATION_TOKEN", "BARCAMPGR_INVITE_PASSWORD", "SLACK_CALLBACK_URL", "MYSQL_PASS", "MYSQL_SERVER", "BARCAMPGR_TEAM_ID", "BARCAMPGR_WEBEX_ID", "BARCAMPGR_ADMIN_PASSWORD", "WEBEX_ORG_ID", "BARCAMPGR_BASE_CALLBACK_URL", "WEBEX_ROOM_ID", "SLACK_API_TOKEN", "CISCO_TEAMS_API_TOKEN", "SLACK_USERNAME", "WEBEX_MEMBERSHIP_CALLBACK_URL", "MYSQL_DATABASE"] | go | 20 | 0 | |
certbot/certbot/_internal/client.py | """Certbot client API."""
import datetime
import logging
import platform
from cryptography.hazmat.backends import default_backend
# See https://github.com/pyca/cryptography/issues/4275
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key # type: ignore
import josepy as jose
import OpenSSL
import zope.component
from acme import client as acme_client
from acme import crypto_util as acme_crypto_util
from acme import errors as acme_errors
from acme import messages
from acme.magic_typing import List
from acme.magic_typing import Optional
import certbot
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot._internal import account
from certbot._internal import auth_handler
from certbot._internal import cli
from certbot._internal import constants
from certbot._internal import eff
from certbot._internal import error_handler
from certbot._internal import storage
from certbot._internal.plugins import selection as plugin_selection
from certbot.compat import os
from certbot.display import ops as display_ops
logger = logging.getLogger(__name__)
def acme_from_config_key(config, key, regr=None):
"Wrangle ACME client construction"
# TODO: Allow for other alg types besides RS256
net = acme_client.ClientNetwork(key, account=regr, verify_ssl=(not config.no_verify_ssl),
user_agent=determine_user_agent(config))
return acme_client.BackwardsCompatibleClientV2(net, key, config.server)
def determine_user_agent(config):
"""
Set a user_agent string in the config based on the choice of plugins.
(this wasn't knowable at construction time)
:returns: the client's User-Agent string
:rtype: `str`
"""
# WARNING: To ensure changes are in line with Certbot's privacy
# policy, talk to a core Certbot team member before making any
# changes here.
if config.user_agent is None:
ua = ("CertbotACMEClient/{0} ({1}; {2}{8}) Authenticator/{3} Installer/{4} "
"({5}; flags: {6}) Py/{7}")
if os.environ.get("CERTBOT_DOCS") == "1":
cli_command = "certbot(-auto)"
os_info = "OS_NAME OS_VERSION"
python_version = "major.minor.patchlevel"
else:
cli_command = cli.cli_command
os_info = util.get_os_info_ua()
python_version = platform.python_version()
ua = ua.format(certbot.__version__, cli_command, os_info,
config.authenticator, config.installer, config.verb,
ua_flags(config), python_version,
"; " + config.user_agent_comment if config.user_agent_comment else "")
else:
ua = config.user_agent
return ua
def ua_flags(config):
"Turn some very important CLI flags into clues in the user agent."
if isinstance(config, DummyConfig):
return "FLAGS"
flags = []
if config.duplicate:
flags.append("dup")
if config.renew_by_default:
flags.append("frn")
if config.allow_subset_of_names:
flags.append("asn")
if config.noninteractive_mode:
flags.append("n")
hook_names = ("pre", "post", "renew", "manual_auth", "manual_cleanup")
hooks = [getattr(config, h + "_hook") for h in hook_names]
if any(hooks):
flags.append("hook")
return " ".join(flags)
class DummyConfig(object):
"Shim for computing a sample user agent."
def __init__(self):
self.authenticator = "XXX"
self.installer = "YYY"
self.user_agent = None
self.verb = "SUBCOMMAND"
def __getattr__(self, name):
"Any config properties we might have are None."
return None
def sample_user_agent():
"Document what this Certbot's user agent string will be like."
return determine_user_agent(DummyConfig())
def register(config, account_storage, tos_cb=None):
"""Register new account with an ACME CA.
This function takes care of generating fresh private key,
registering the account, optionally accepting CA Terms of Service
and finally saving the account. It should be called prior to
initialization of `Client`, unless account has already been created.
:param .IConfig config: Client configuration.
:param .AccountStorage account_storage: Account storage where newly
registered account will be saved to. Save happens only after TOS
acceptance step, so any account private keys or
`.RegistrationResource` will not be persisted if `tos_cb`
returns ``False``.
:param tos_cb: If ACME CA requires the user to accept a Terms of
Service before registering account, client action is
necessary. For example, a CLI tool would prompt the user
acceptance. `tos_cb` must be a callable that should accept
`.RegistrationResource` and return a `bool`: ``True`` iff the
Terms of Service present in the contained
`.Registration.terms_of_service` is accepted by the client, and
``False`` otherwise. ``tos_cb`` will be called only if the
client action is necessary, i.e. when ``terms_of_service is not
None``. This argument is optional, if not supplied it will
default to automatic acceptance!
:raises certbot.errors.Error: In case of any client problems, in
particular registration failure, or unaccepted Terms of Service.
:raises acme.errors.Error: In case of any protocol problems.
:returns: Newly registered and saved account, as well as protocol
API handle (should be used in `Client` initialization).
:rtype: `tuple` of `.Account` and `acme.client.Client`
"""
# Log non-standard actions, potentially wrong API calls
if account_storage.find_all():
logger.info("There are already existing accounts for %s", config.server)
if config.email is None:
if not config.register_unsafely_without_email:
msg = ("No email was provided and "
"--register-unsafely-without-email was not present.")
logger.warning(msg)
raise errors.Error(msg)
if not config.dry_run:
logger.info("Registering without email!")
# If --dry-run is used, and there is no staging account, create one with no email.
if config.dry_run:
config.email = None
# Each new registration shall use a fresh new key
rsa_key = generate_private_key(
public_exponent=65537,
key_size=config.rsa_key_size,
backend=default_backend())
key = jose.JWKRSA(key=jose.ComparableRSAKey(rsa_key))
acme = acme_from_config_key(config, key)
# TODO: add phone?
regr = perform_registration(acme, config, tos_cb)
acc = account.Account(regr, key)
account.report_new_account(config)
account_storage.save(acc, acme)
eff.prepare_subscription(config, acc)
return acc, acme
def perform_registration(acme, config, tos_cb):
"""
Actually register new account, trying repeatedly if there are email
problems
:param acme.client.Client client: ACME client object.
:param .IConfig config: Client configuration.
:param Callable tos_cb: a callback to handle Term of Service agreement.
:returns: Registration Resource.
:rtype: `acme.messages.RegistrationResource`
"""
eab_credentials_supplied = config.eab_kid and config.eab_hmac_key
if eab_credentials_supplied:
account_public_key = acme.client.net.key.public_key()
eab = messages.ExternalAccountBinding.from_data(account_public_key=account_public_key,
kid=config.eab_kid,
hmac_key=config.eab_hmac_key,
directory=acme.client.directory)
else:
eab = None
if acme.external_account_required():
if not eab_credentials_supplied:
msg = ("Server requires external account binding."
" Please use --eab-kid and --eab-hmac-key.")
raise errors.Error(msg)
try:
newreg = messages.NewRegistration.from_data(email=config.email,
external_account_binding=eab)
return acme.new_account_and_tos(newreg, tos_cb)
except messages.Error as e:
if e.code == "invalidEmail" or e.code == "invalidContact":
if config.noninteractive_mode:
msg = ("The ACME server believes %s is an invalid email address. "
"Please ensure it is a valid email and attempt "
"registration again." % config.email)
raise errors.Error(msg)
config.email = display_ops.get_email(invalid=True)
return perform_registration(acme, config, tos_cb)
raise
class Client(object):
"""Certbot's client.
:ivar .IConfig config: Client configuration.
:ivar .Account account: Account registered with `register`.
:ivar .AuthHandler auth_handler: Authorizations handler that will
dispatch DV challenges to appropriate authenticators
(providing `.IAuthenticator` interface).
:ivar .IAuthenticator auth: Prepared (`.IAuthenticator.prepare`)
authenticator that can solve ACME challenges.
:ivar .IInstaller installer: Installer.
:ivar acme.client.BackwardsCompatibleClientV2 acme: Optional ACME
client API handle. You might already have one from `register`.
"""
def __init__(self, config, account_, auth, installer, acme=None):
"""Initialize a client."""
self.config = config
self.account = account_
self.auth = auth
self.installer = installer
# Initialize ACME if account is provided
if acme is None and self.account is not None:
acme = acme_from_config_key(config, self.account.key, self.account.regr)
self.acme = acme
if auth is not None:
self.auth_handler = auth_handler.AuthHandler(
auth, self.acme, self.account, self.config.pref_challs)
else:
self.auth_handler = None
def obtain_certificate_from_csr(self, csr, orderr=None):
"""Obtain certificate.
:param .util.CSR csr: PEM-encoded Certificate Signing
Request. The key used to generate this CSR can be different
than `authkey`.
:param acme.messages.OrderResource orderr: contains authzrs
:returns: certificate and chain as PEM byte strings
:rtype: tuple
"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.warning(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s", csr)
if orderr is None:
orderr = self._get_order_and_authorizations(csr.data, best_effort=False)
deadline = datetime.datetime.now() + datetime.timedelta(seconds=90)
orderr = self.acme.finalize_order(orderr, deadline)
cert, chain = crypto_util.cert_and_chain_from_fullchain(orderr.fullchain_pem)
return cert.encode(), chain.encode()
def obtain_certificate(self, domains, old_keypath=None):
"""Obtains a certificate from the ACME server.
`.register` must be called before `.obtain_certificate`
:param list domains: domains to get a certificate
:returns: certificate as PEM string, chain as PEM string,
newly generated private key (`.util.Key`), and DER-encoded
Certificate Signing Request (`.util.CSR`).
:rtype: tuple
"""
# We need to determine the key path, key PEM data, CSR path,
# and CSR PEM data. For a dry run, the paths are None because
# they aren't permanently saved to disk. For a lineage with
# --reuse-key, the key path and PEM data are derived from an
# existing file.
if old_keypath is not None:
# We've been asked to reuse a specific existing private key.
# Therefore, we'll read it now and not generate a new one in
# either case below.
#
# We read in bytes here because the type of `key.pem`
# created below is also bytes.
with open(old_keypath, "rb") as f:
keypath = old_keypath
keypem = f.read()
key = util.Key(file=keypath, pem=keypem) # type: Optional[util.Key]
logger.info("Reusing existing private key from %s.", old_keypath)
else:
# The key is set to None here but will be created below.
key = None
# Create CSR from names
if self.config.dry_run:
key = key or util.Key(file=None,
pem=crypto_util.make_key(self.config.rsa_key_size))
csr = util.CSR(file=None, form="pem",
data=acme_crypto_util.make_csr(
key.pem, domains, self.config.must_staple))
else:
key = key or crypto_util.init_save_key(self.config.rsa_key_size,
self.config.key_dir)
csr = crypto_util.init_save_csr(key, domains, self.config.csr_dir)
orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names)
authzr = orderr.authorizations
auth_domains = set(a.body.identifier.value for a in authzr)
successful_domains = [d for d in domains if d in auth_domains]
# allow_subset_of_names is currently disabled for wildcard
# certificates. The reason for this and checking allow_subset_of_names
# below is because successful_domains == domains is never true if
# domains contains a wildcard because the ACME spec forbids identifiers
# in authzs from containing a wildcard character.
if self.config.allow_subset_of_names and successful_domains != domains:
if not self.config.dry_run:
os.remove(key.file)
os.remove(csr.file)
return self.obtain_certificate(successful_domains)
else:
cert, chain = self.obtain_certificate_from_csr(csr, orderr)
return cert, chain, key, csr
def _get_order_and_authorizations(self, csr_pem, best_effort):
# type: (str, bool) -> List[messages.OrderResource]
"""Request a new order and complete its authorizations.
:param str csr_pem: A CSR in PEM format.
:param bool best_effort: True if failing to complete all
authorizations should not raise an exception
:returns: order resource containing its completed authorizations
:rtype: acme.messages.OrderResource
"""
try:
orderr = self.acme.new_order(csr_pem)
except acme_errors.WildcardUnsupportedError:
raise errors.Error("The currently selected ACME CA endpoint does"
" not support issuing wildcard certificates.")
# For a dry run, ensure we have an order with fresh authorizations
if orderr and self.config.dry_run:
deactivated, failed = self.auth_handler.deactivate_valid_authorizations(orderr)
if deactivated:
logger.debug("Recreating order after authz deactivations")
orderr = self.acme.new_order(csr_pem)
if failed:
logger.warning("Certbot was unable to obtain fresh authorizations for every domain"
". The dry run will continue, but results may not be accurate.")
authzr = self.auth_handler.handle_authorizations(orderr, best_effort)
return orderr.update(authorizations=authzr)
def obtain_and_enroll_certificate(self, domains, certname):
"""Obtain and enroll certificate.
Get a new certificate for the specified domains using the specified
authenticator and installer, and then create a new renewable lineage
containing it.
:param domains: domains to request a certificate for
:type domains: `list` of `str`
:param certname: requested name of lineage
:type certname: `str` or `None`
:returns: A new :class:`certbot._internal.storage.RenewableCert` instance
referred to the enrolled cert lineage, False if the cert could not
be obtained, or None if doing a successful dry run.
"""
cert, chain, key, _ = self.obtain_certificate(domains)
if (self.config.config_dir != constants.CLI_DEFAULTS["config_dir"] or
self.config.work_dir != constants.CLI_DEFAULTS["work_dir"]):
logger.info(
"Non-standard path(s), might not work with crontab installed "
"by your operating system package manager")
new_name = self._choose_lineagename(domains, certname)
if self.config.dry_run:
logger.debug("Dry run: Skipping creating new lineage for %s",
new_name)
return None
return storage.RenewableCert.new_lineage(
new_name, cert,
key.pem, chain,
self.config)
def _choose_lineagename(self, domains, certname):
"""Chooses a name for the new lineage.
:param domains: domains in certificate request
:type domains: `list` of `str`
:param certname: requested name of lineage
:type certname: `str` or `None`
:returns: lineage name that should be used
:rtype: str
"""
if certname:
return certname
elif util.is_wildcard_domain(domains[0]):
# Don't make files and directories starting with *.
return domains[0][2:]
return domains[0]
def save_certificate(self, cert_pem, chain_pem,
cert_path, chain_path, fullchain_path):
"""Saves the certificate received from the ACME server.
:param str cert_pem:
:param str chain_pem:
:param str cert_path: Candidate path to a certificate.
:param str chain_path: Candidate path to a certificate chain.
:param str fullchain_path: Candidate path to a full cert chain.
:returns: cert_path, chain_path, and fullchain_path as absolute
paths to the actual files
:rtype: `tuple` of `str`
:raises IOError: If unable to find room to write the cert files
"""
for path in cert_path, chain_path, fullchain_path:
util.make_or_verify_dir(os.path.dirname(path), 0o755, self.config.strict_permissions)
cert_file, abs_cert_path = _open_pem_file('cert_path', cert_path)
try:
cert_file.write(cert_pem)
finally:
cert_file.close()
logger.info("Server issued certificate; certificate written to %s",
abs_cert_path)
chain_file, abs_chain_path =\
_open_pem_file('chain_path', chain_path)
fullchain_file, abs_fullchain_path =\
_open_pem_file('fullchain_path', fullchain_path)
_save_chain(chain_pem, chain_file)
_save_chain(cert_pem + chain_pem, fullchain_file)
return abs_cert_path, abs_chain_path, abs_fullchain_path
def deploy_certificate(self, domains, privkey_path,
cert_path, chain_path, fullchain_path):
"""Install certificate
:param list domains: list of domains to install the certificate
:param str privkey_path: path to certificate private key
:param str cert_path: certificate file path (optional)
:param str chain_path: chain file path
"""
if self.installer is None:
logger.warning("No installer specified, client is unable to deploy"
"the certificate")
raise errors.Error("No installer available")
chain_path = None if chain_path is None else os.path.abspath(chain_path)
msg = ("Unable to install the certificate")
with error_handler.ErrorHandler(self._recovery_routine_with_msg, msg):
for dom in domains:
self.installer.deploy_cert(
domain=dom, cert_path=os.path.abspath(cert_path),
key_path=os.path.abspath(privkey_path),
chain_path=chain_path,
fullchain_path=fullchain_path)
self.installer.save() # needed by the Apache plugin
self.installer.save("Deployed ACME Certificate")
msg = ("We were unable to install your certificate, "
"however, we successfully restored your "
"server to its prior configuration.")
with error_handler.ErrorHandler(self._rollback_and_restart, msg):
# sites may have been enabled / final cleanup
self.installer.restart()
def enhance_config(self, domains, chain_path, redirect_default=True):
"""Enhance the configuration.
:param list domains: list of domains to configure
:param chain_path: chain file path
:type chain_path: `str` or `None`
:param redirect_default: boolean value that the "redirect" flag should default to
:raises .errors.Error: if no installer is specified in the
client.
"""
if self.installer is None:
logger.warning("No installer is specified, there isn't any "
"configuration to enhance.")
raise errors.Error("No installer available")
enhanced = False
enhancement_info = (
("hsts", "ensure-http-header", "Strict-Transport-Security"),
("redirect", "redirect", None),
("staple", "staple-ocsp", chain_path),
("uir", "ensure-http-header", "Upgrade-Insecure-Requests"),)
supported = self.installer.supported_enhancements()
for config_name, enhancement_name, option in enhancement_info:
config_value = getattr(self.config, config_name)
if enhancement_name in supported:
if config_name == "redirect" and config_value is None:
config_value = redirect_default
if config_value:
self.apply_enhancement(domains, enhancement_name, option)
enhanced = True
elif config_value:
logger.warning(
"Option %s is not supported by the selected installer. "
"Skipping enhancement.", config_name)
msg = ("We were unable to restart web server")
if enhanced:
with error_handler.ErrorHandler(self._rollback_and_restart, msg):
self.installer.restart()
def apply_enhancement(self, domains, enhancement, options=None):
"""Applies an enhancement on all domains.
:param list domains: list of ssl_vhosts (as strings)
:param str enhancement: name of enhancement, e.g. ensure-http-header
:param str options: options to enhancement, e.g. Strict-Transport-Security
.. note:: When more `options` are needed, make options a list.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
msg = ("We were unable to set up enhancement %s for your server, "
"however, we successfully installed your certificate."
% (enhancement))
with error_handler.ErrorHandler(self._recovery_routine_with_msg, msg):
for dom in domains:
try:
self.installer.enhance(dom, enhancement, options)
except errors.PluginEnhancementAlreadyPresent:
if enhancement == "ensure-http-header":
logger.warning("Enhancement %s was already set.",
options)
else:
logger.warning("Enhancement %s was already set.",
enhancement)
except errors.PluginError:
logger.warning("Unable to set enhancement %s for %s",
enhancement, dom)
raise
self.installer.save("Add enhancement %s" % (enhancement))
def _recovery_routine_with_msg(self, success_msg):
"""Calls the installer's recovery routine and prints success_msg
:param str success_msg: message to show on successful recovery
"""
self.installer.recovery_routine()
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(success_msg, reporter.HIGH_PRIORITY)
def _rollback_and_restart(self, success_msg):
"""Rollback the most recent checkpoint and restart the webserver
:param str success_msg: message to show on successful rollback
"""
logger.critical("Rolling back to previous server configuration...")
reporter = zope.component.getUtility(interfaces.IReporter)
try:
self.installer.rollback_checkpoints()
self.installer.restart()
except:
reporter.add_message(
"An error occurred and we failed to restore your config and "
"restart your server. Please post to "
"https://community.letsencrypt.org/c/help "
"with details about your configuration and this error you received.",
reporter.HIGH_PRIORITY)
raise
reporter.add_message(success_msg, reporter.HIGH_PRIORITY)
def validate_key_csr(privkey, csr=None):
"""Validate Key and CSR files.
Verifies that the client key and csr arguments are valid and correspond to
one another. This does not currently check the names in the CSR due to
the inability to read SANs from CSRs in python crypto libraries.
If csr is left as None, only the key will be validated.
:param privkey: Key associated with CSR
:type privkey: :class:`certbot.util.Key`
:param .util.CSR csr: CSR
:raises .errors.Error: when validation fails
"""
# TODO: Handle all of these problems appropriately
# The client can eventually do things like prompt the user
# and allow the user to take more appropriate actions
# Key must be readable and valid.
if privkey.pem and not crypto_util.valid_privkey(privkey.pem):
raise errors.Error("The provided key is not a valid key")
if csr:
if csr.form == "der":
csr_obj = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)
cert_buffer = OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_obj
)
csr = util.CSR(csr.file, cert_buffer, "pem")
# If CSR is provided, it must be readable and valid.
if csr.data and not crypto_util.valid_csr(csr.data):
raise errors.Error("The provided CSR is not a valid CSR")
# If both CSR and key are provided, the key must be the same key used
# in the CSR.
if csr.data and privkey.pem:
if not crypto_util.csr_matches_pubkey(
csr.data, privkey.pem):
raise errors.Error("The key and CSR do not match")
def rollback(default_installer, checkpoints, config, plugins):
"""Revert configuration the specified number of checkpoints.
:param int checkpoints: Number of checkpoints to revert.
:param config: Configuration.
:type config: :class:`certbot.interfaces.IConfig`
"""
# Misconfigurations are only a slight problems... allow the user to rollback
installer = plugin_selection.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for rollback?")
# No Errors occurred during init... proceed normally
# If installer is None... couldn't find an installer... there shouldn't be
# anything to rollback
if installer is not None:
installer.rollback_checkpoints(checkpoints)
installer.restart()
def _open_pem_file(cli_arg_path, pem_path):
"""Open a pem file.
If cli_arg_path was set by the client, open that.
Otherwise, uniquify the file path.
:param str cli_arg_path: the cli arg name, e.g. cert_path
:param str pem_path: the pem file path to open
:returns: a tuple of file object and its absolute file path
"""
if cli.set_by_cli(cli_arg_path):
return util.safe_open(pem_path, chmod=0o644, mode="wb"),\
os.path.abspath(pem_path)
uniq = util.unique_file(pem_path, 0o644, "wb")
return uniq[0], os.path.abspath(uniq[1])
def _save_chain(chain_pem, chain_file):
"""Saves chain_pem at a unique path based on chain_path.
:param str chain_pem: certificate chain in PEM format
:param str chain_file: chain file object
"""
try:
chain_file.write(chain_pem)
finally:
chain_file.close()
logger.info("Cert chain written to %s", chain_file.name)
| []
| []
| [
"CERTBOT_DOCS"
]
| [] | ["CERTBOT_DOCS"] | python | 1 | 0 | |
config/wsgi.py | """
WSGI config for vien_nchl project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# vien_nchl directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'vien_nchl'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
vultr/vultr.go | package vultr
import (
"context"
"fmt"
"os"
"github.com/nanovms/ops/lepton"
"github.com/nanovms/ops/types"
"github.com/vultr/govultr/v2"
"golang.org/x/oauth2"
)
// Vultr provides access to the Vultr API.
type Vultr struct {
Storage *Objects
Client *govultr.Client
}
// Initialize provider
func (v *Vultr) Initialize(config *types.ProviderConfig) error {
apiKey := os.Getenv("VULTR_TOKEN")
if apiKey == "" {
return fmt.Errorf("VULTR_TOKEN is not set")
}
vultrConfig := &oauth2.Config{}
ctx := context.Background()
ts := vultrConfig.TokenSource(ctx, &oauth2.Token{AccessToken: apiKey})
v.Client = govultr.NewClient(oauth2.NewClient(ctx, ts))
return nil
}
// GetStorage returns storage interface for cloud provider
func (v *Vultr) GetStorage() lepton.Storage {
return v.Storage
}
| [
"\"VULTR_TOKEN\""
]
| []
| [
"VULTR_TOKEN"
]
| [] | ["VULTR_TOKEN"] | go | 1 | 0 | |
cmd/frontend/internal/app/ui/handlers.go | package ui
import (
"context"
"html/template"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/inconshreveable/log15"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/auth"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/assetsutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/jscontext"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/handlerutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/types"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/routevar"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/internal/vcs/git"
)
type InjectedHTML struct {
HeadTop template.HTML
HeadBottom template.HTML
BodyTop template.HTML
BodyBottom template.HTML
}
type Metadata struct {
// Title is the title of the page for Twitter cards, OpenGraph, etc.
// e.g. "Open in Sourcegraph"
Title string
// Description is the description of the page for Twitter cards, OpenGraph,
// etc. e.g. "View this link in Sourcegraph Editor."
Description string
// ShowPreview controls whether or not OpenGraph/Twitter card/etc metadata is rendered.
ShowPreview bool
}
type Common struct {
Injected InjectedHTML
Metadata *Metadata
Context jscontext.JSContext
AssetURL string
Title string
Error *pageError
WebpackDevServer bool // whether the Webpack dev server is running (WEBPACK_DEV_SERVER env var)
// The fields below have zero values when not on a repo page.
Repo *types.Repo
Rev string // unresolved / user-specified revision (e.x.: "@master")
api.CommitID // resolved SHA1 revision
}
var webpackDevServer, _ = strconv.ParseBool(os.Getenv("WEBPACK_DEV_SERVER"))
// repoShortName trims the first path element of the given repo name if it has
// at least two path components.
func repoShortName(name api.RepoName) string {
split := strings.Split(string(name), "/")
if len(split) < 2 {
return string(name)
}
return strings.Join(split[1:], "/")
}
// newCommon builds a *Common data structure, returning an error if one occurs.
//
// In the event of the repository having been renamed, the request is handled
// by newCommon and nil, nil is returned. Basic usage looks like:
//
// common, err := newCommon(w, r, serveError)
// if err != nil {
// return err
// }
// if common == nil {
// return nil // request was handled
// }
//
// In the case of a repository that is cloning, a Common data structure is
// returned but it has an incomplete RevSpec.
func newCommon(w http.ResponseWriter, r *http.Request, title string, serveError func(w http.ResponseWriter, r *http.Request, err error, statusCode int)) (*Common, error) {
common := &Common{
Injected: InjectedHTML{
HeadTop: template.HTML(conf.Get().HtmlHeadTop),
HeadBottom: template.HTML(conf.Get().HtmlHeadBottom),
BodyTop: template.HTML(conf.Get().HtmlBodyTop),
BodyBottom: template.HTML(conf.Get().HtmlBodyBottom),
},
Context: jscontext.NewJSContextFromRequest(r),
AssetURL: assetsutil.URL("").String(),
Title: title,
Metadata: &Metadata{
Title: conf.BrandName(),
Description: "Sourcegraph is a web-based code search and navigation tool for dev teams. Search, navigate, and review code. Find answers.",
ShowPreview: r.URL.Path == "/sign-in" && r.URL.RawQuery == "returnTo=%2F",
},
WebpackDevServer: webpackDevServer,
}
if _, ok := mux.Vars(r)["Repo"]; ok {
// Common repo pages (blob, tree, etc).
var err error
common.Repo, common.CommitID, err = handlerutil.GetRepoAndRev(r.Context(), mux.Vars(r))
isRepoEmptyError := routevar.ToRepoRev(mux.Vars(r)).Rev == "" && gitserver.IsRevisionNotFound(errors.Cause(err)) // should reply with HTTP 200
if err != nil && !isRepoEmptyError {
if e, ok := err.(*handlerutil.URLMovedError); ok {
// The repository has been renamed, e.g. "github.com/docker/docker"
// was renamed to "github.com/moby/moby" -> redirect the user now.
err = handlerutil.RedirectToNewRepoName(w, r, e.NewRepo)
if err != nil {
return nil, errors.Wrap(err, "when sending renamed repository redirect response")
}
return nil, nil
}
if e, ok := err.(backend.ErrRepoSeeOther); ok {
// Repo does not exist here, redirect to the recommended location.
u, err := url.Parse(e.RedirectURL)
if err != nil {
return nil, err
}
u.Path, u.RawQuery = r.URL.Path, r.URL.RawQuery
http.Redirect(w, r, u.String(), http.StatusSeeOther)
return nil, nil
}
if gitserver.IsRevisionNotFound(errors.Cause(err)) {
// Revision does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if _, ok := errors.Cause(err).(*gitserver.RepoNotCloneableErr); ok {
if errcode.IsNotFound(err) {
// Repository is not found.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
// Repository is not clonable.
dangerouslyServeError(w, r, errors.New("repository could not be cloned"), http.StatusInternalServerError)
return nil, nil
}
if vcs.IsRepoNotExist(err) {
if vcs.IsCloneInProgress(err) {
// Repo is cloning.
return common, nil
}
// Repo does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if errcode.IsNotFound(err) {
// Repo does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if errcode.IsUnauthorized(err) {
// Not authorized to access repository.
serveError(w, r, err, http.StatusUnauthorized)
return nil, nil
}
return nil, err
}
if common.Repo.Name == "github.com/sourcegraphtest/Always500Test" {
return nil, errors.New("error caused by Always500Test repo name")
}
common.Rev = mux.Vars(r)["Rev"]
// Update gitserver contents for a repo whenever it is visited.
go func() {
ctx := context.Background()
gitserverRepo, err := backend.GitRepo(ctx, common.Repo)
if err != nil {
log15.Error("backend.GitRepo", "error", err)
return
}
_, err = repoupdater.DefaultClient.EnqueueRepoUpdate(ctx, gitserverRepo)
if err != nil {
log15.Error("EnqueueRepoUpdate", "error", err)
}
}()
}
return common, nil
}
type handlerFunc func(w http.ResponseWriter, r *http.Request) error
func serveBrandedPageString(titles ...string) handlerFunc {
return serveBasicPage(func(c *Common, r *http.Request) string {
return brandNameSubtitle(titles...)
})
}
func serveBasicPage(title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
common.Title = title(common, r)
return renderTemplate(w, "app.html", common)
}
}
func serveHome(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, conf.BrandName(), serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
if envvar.SourcegraphDotComMode() && !actor.FromContext(r.Context()).IsAuthenticated() {
// The user is not signed in and tried to access Sourcegraph.com. Redirect to
// about.sourcegraph.com so they see general info page.
http.Redirect(w, r, (&url.URL{Scheme: aboutRedirectScheme, Host: aboutRedirectHost}).String(), http.StatusTemporaryRedirect)
return nil
}
// On non-Sourcegraph.com instances, there is no separate homepage, so redirect to /search.
r.URL.Path = "/search"
http.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)
return nil
}
func serveSignIn(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
common.Title = brandNameSubtitle("Sign in")
return renderTemplate(w, "app.html", common)
}
// redirectTreeOrBlob redirects a blob page to a tree page if the file is actually a directory,
// or a tree page to a blob page if the directory is actually a file.
func redirectTreeOrBlob(routeName, path string, common *Common, w http.ResponseWriter, r *http.Request) (requestHandled bool, err error) {
// NOTE: It makes no sense for this function to proceed if the commit ID
// for the repository is empty. It is most likely the repository is still
// clone in progress.
if common.CommitID == "" {
return false, nil
}
if path == "/" || path == "" {
if routeName != routeRepo {
// Redirect to repo route
target := "/" + string(common.Repo.Name) + common.Rev
http.Redirect(w, r, target, http.StatusTemporaryRedirect)
return true, nil
}
return false, nil
}
cachedRepo, err := backend.CachedGitRepo(r.Context(), common.Repo)
if err != nil {
return false, err
}
stat, err := git.Stat(r.Context(), *cachedRepo, common.CommitID, path)
if err != nil {
if os.IsNotExist(err) {
serveError(w, r, err, http.StatusNotFound)
return true, nil
}
return false, err
}
expectedDir := routeName == routeTree
if stat.Mode().IsDir() != expectedDir {
target := "/" + string(common.Repo.Name) + common.Rev + "/-/"
if expectedDir {
target += "blob"
} else {
target += "tree"
}
target += path
http.Redirect(w, r, auth.SafeRedirectURL(target), http.StatusTemporaryRedirect)
return true, nil
}
return false, nil
}
// serveTree serves the tree (directory) pages.
func serveTree(title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
handled, err := redirectTreeOrBlob(routeTree, mux.Vars(r)["Path"], common, w, r)
if handled {
return nil
}
if err != nil {
return err
}
common.Title = title(common, r)
return renderTemplate(w, "app.html", common)
}
}
func serveRepoOrBlob(routeName string, title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
handled, err := redirectTreeOrBlob(routeName, mux.Vars(r)["Path"], common, w, r)
if handled {
return nil
}
if err != nil {
return err
}
common.Title = title(common, r)
q := r.URL.Query()
_, isNewQueryUX := q["sq"] // sq URL param is only set by new query UX in SearchNavbarItem.tsx
if search := q.Get("q"); search != "" && !isNewQueryUX {
// Redirect old search URLs:
//
// /github.com/gorilla/mux@24fca303ac6da784b9e8269f724ddeb0b2eea5e7?q=ErrMethodMismatch&utm_source=chrome-extension
// /github.com/gorilla/mux@24fca303ac6da784b9e8269f724ddeb0b2eea5e7/-/blob/mux.go?q=NewRouter
//
// To new ones:
//
// /search?q=repo:^github.com/gorilla/mux$+ErrMethodMismatch
//
// It does not apply the file: filter because that was not the behavior of the
// old blob URLs with a 'q' parameter either.
r.URL.Path = "/search"
q.Set("sq", "repo:^"+regexp.QuoteMeta(string(common.Repo.Name))+"$")
r.URL.RawQuery = q.Encode()
http.Redirect(w, r, r.URL.String(), http.StatusPermanentRedirect)
return nil
}
return renderTemplate(w, "app.html", common)
}
}
// searchBadgeHandler serves the search readme badges from the search-badger service
// https://github.com/sourcegraph/search-badger
var searchBadgeHandler = &httputil.ReverseProxy{
Director: func(r *http.Request) {
r.URL.Scheme = "http"
r.URL.Host = "search-badger"
r.URL.Path = "/"
},
ErrorLog: log.New(env.DebugOut, "search-badger proxy: ", log.LstdFlags),
}
| [
"\"WEBPACK_DEV_SERVER\""
]
| []
| [
"WEBPACK_DEV_SERVER"
]
| [] | ["WEBPACK_DEV_SERVER"] | go | 1 | 0 | |
buffer/in-vicinity-python/uno/test.py | #-*- coding : utf-8 -*-
# openoffice -accept="socket,host=localhost,port=2002;urp;"
# soffice -accept="socket,host=localhost,port=2002;urp;"
''' autor : not i, i - modifer '''
import os
import sys
def import_uno():
# Add the URE_BOOTSTRAP environment variable #3
os.environ['URE_BOOTSTRAP'] = 'vnd.sun.star.pathname:c:\Program Files\OpenOffice.org 3\program\\fundamental.ini'
# Add the UNO_PATH environment variable #4
os.environ['UNO_PATH'] = 'c:\Program Files\OpenOffice.org 3\program\\'
# Add the PATH environment variable, but weed the duplicates first #5
new_paths_string = 'c:\Program Files\OpenOffice.org 3\\URE\\bin;c:\Program Files\OpenOffice.org 3\Basis\program;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\\Wbem;C:\Program Files\Common Files\Intuit\QBPOSSDKRuntime'
new_paths = new_paths_string.split(';')
existing_paths = os.environ['PATH'].split(';')
for path in new_paths:
if path not in existing_paths:
existing_paths.append(path)
os.environ['PATH'] = ';'.join(existing_paths)
# Add the uno location to PYTHONPATH #6
sys.path.append('C:\\Program Files\\OpenOffice.org 3\\Basis\\program')
return
# Begin Main
import_uno()
import uno
''' Here is the sequence of things the lines do:
1. Get the uno component context from the PyUNO runtime
2. Create the UnoUrlResolver
3. Get the central desktop object
4. Declare the ServiceManager
5. Get the central desktop object
6. Access the current writer document
7. Access the document's text property
8. Create a cursor
9. Insert the text into the document '''
localContext = uno.getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", localContext )
ctx = resolver.resolve( "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext" )
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext( "com.sun.star.frame.Desktop",ctx)
model = desktop.getCurrentComponent()
text = model.Text
cursor = text.createTextCursor()
text.insertString( cursor, "Hello World", 0 )
''' Do a nasty thing before exiting the python process. In case the
last call is a one-way call (e.g. see idl-spec of insertString),
it must be forced out of the remote-bridge caches before python
exits the process. Otherwise, the one-way call may or may not reach
the target object.
I do this here by calling a cheap synchronous call (getPropertyValue).'''
ctx.ServiceManager
document = desktop.getCurrentComponent()
fname = "/letter2.html"
fileSysPath = os.getcwd()
fileSysPath = fileSysPath.replace('\\', '/')
fullSaveFileName = "file:///"+fileSysPath+fname
document.storeAsURL( fullSaveFileName,())
# End Main | []
| []
| [
"URE_BOOTSTRAP",
"UNO_PATH",
"PATH"
]
| [] | ["URE_BOOTSTRAP", "UNO_PATH", "PATH"] | python | 3 | 0 | |
electrum/util.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from typing import NamedTuple, Union, TYPE_CHECKING, Tuple, Optional, Callable
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
import inspect
from locale import localeconv
import asyncio
import urllib.request, urllib.parse, urllib.error
import builtins
import json
import time
import aiohttp
from aiohttp_socks import SocksConnector, SocksVer
from aiorpcx import TaskGroup
from .i18n import _
if TYPE_CHECKING:
from .network import Network
from .interface import Interface
from .simple_config import SimpleConfig
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'NIX':8, 'mNIX':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['NIX', 'mNIX', 'bits', 'sat'] # list(dict) does not guarantee order
DECIMAL_POINT_DEFAULT = 8 # mNIX
class UnknownBaseUnit(Exception): pass
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "NIX"
try:
return base_units_inverse[dp]
except KeyError:
raise UnknownBaseUnit(dp) from None
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "NIX" -> 8
try:
return base_units[unit_name]
except KeyError:
raise UnknownBaseUnit(unit_name) from None
class NotEnoughFunds(Exception):
def __str__(self):
return _("Insufficient funds")
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
class UserFacingException(Exception):
"""Exception that contains information intended to be shown to the user."""
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
__slots__ = ('value',)
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " NIX"
class Fiat(object):
__slots__ = ('value', 'ccy')
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value is None or self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return ''
def log_name(self):
msg = self.verbosity_filter or self.__class__.__name__
d = self.diagnostic_name()
if d: msg += "][" + d
return "[%s]" % msg
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error(self.log_name(), *msg)
def print_stderr(self, *msg):
print_stderr(self.log_name(), *msg)
def print_msg(self, *msg):
print_msg(self.log_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = ''
def set_verbosity(filters: Union[str, bool]):
global verbosity
if type(filters) is bool: # backwards compat
verbosity = '*' if filters else ''
return
verbosity = filters
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def ensure_sparse_file(filename):
# On modern Linux, no need to do anything.
# On Windows, need to explicitly mark file.
if os.name == "nt":
try:
os.system('fsutil sparse setflag "{}" 1'.format(filename))
except Exception as e:
print_error('error marking file {} as sparse: {}'.format(filename, e))
def get_headers_dir(config):
return config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc) -> str:
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8') -> bytes:
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-nix")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-NIX")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-NIX")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
# format string
decimal_format = "." + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
# initial result
scale_factor = pow(10, decimal_point)
if not isinstance(x, Decimal):
x = Decimal(x).quantize(Decimal('1E-8'))
result = ("{:" + decimal_format + "f}").format(x / scale_factor)
if "." not in result: result += "."
result = result.rstrip('0')
# extra decimal places
integer_part, fract_part = result.split(".")
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + DECIMAL_POINT + fract_part
# leading/trailing whitespaces
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, *, num_zeros=0, precision=None):
if precision is None:
precision = FEERATE_PRECISION
num_zeros = min(num_zeros, FEERATE_PRECISION) # no more zeroes than available prec
return format_satoshis(fee, num_zeros=num_zeros, decimal_point=0, precision=precision)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'blockchain.nixplatform.io': ('https://blockchain.nixplatform.io/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return mainnet_block_explorers if not constants.net.TESTNET else testnet_block_explorers
def block_explorer(config: 'SimpleConfig') -> str:
from . import constants
default_ = 'Blockstream.info'
be_key = config.get('block_explorer', default_)
be = block_explorer_info().get(be_key)
return be_key if be is not None else default_
def block_explorer_tuple(config: 'SimpleConfig') -> Optional[Tuple[str, dict]]:
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config: 'SimpleConfig', kind: str, item: str) -> Optional[str]:
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
explorer_url, explorer_dict = be_tuple
kind_str = explorer_dict.get(kind)
if kind_str is None:
return
url_parts = [explorer_url, kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri: str, on_pr: Callable=None) -> dict:
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a nix address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'nix':
raise Exception("Not a nix URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid nix address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
async def get_payment_request():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = await pr.get_payment_request(r)
if on_pr:
on_pr(request)
loop = asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(get_payment_request(), loop)
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='nix', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def log_exceptions(func):
"""Decorator to log AND re-raise exceptions."""
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
self = args[0] if len(args) > 0 else None
try:
return await func(*args, **kwargs)
except asyncio.CancelledError as e:
raise
except BaseException as e:
print_ = self.print_error if hasattr(self, 'print_error') else print_error
print_("Exception in", func.__name__, ":", repr(e))
try:
traceback.print_exc(file=sys.stderr)
except BaseException as e2:
print_error("traceback.print_exc raised: {}...".format(e2))
raise
return wrapper
def ignore_exceptions(func):
"""Decorator to silently swallow all exceptions."""
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except BaseException as e:
pass
return wrapper
class TxMinedStatus(NamedTuple):
height: int
conf: int
timestamp: int
header_hash: str
class VerifiedTxInfo(NamedTuple):
height: int
timestamp: int
txpos: int
header_hash: str
def make_aiohttp_session(proxy: dict, headers=None, timeout=None):
if headers is None:
headers = {'User-Agent': 'Electrum'}
if timeout is None:
timeout = aiohttp.ClientTimeout(total=10)
if proxy:
connector = SocksConnector(
socks_ver=SocksVer.SOCKS5 if proxy['mode'] == 'socks5' else SocksVer.SOCKS4,
host=proxy['host'],
port=int(proxy['port']),
username=proxy.get('user', None),
password=proxy.get('password', None),
rdns=True
)
return aiohttp.ClientSession(headers=headers, timeout=timeout, connector=connector)
else:
return aiohttp.ClientSession(headers=headers, timeout=timeout)
class SilentTaskGroup(TaskGroup):
def spawn(self, *args, **kwargs):
# don't complain if group is already closed.
if self._closed:
raise asyncio.CancelledError()
return super().spawn(*args, **kwargs)
class NetworkJobOnDefaultServer(PrintError):
"""An abstract base class for a job that runs on the main network
interface. Every time the main interface changes, the job is
restarted, and some of its internals are reset.
"""
def __init__(self, network: 'Network'):
asyncio.set_event_loop(network.asyncio_loop)
self.network = network
self.interface = None # type: Interface
self._restart_lock = asyncio.Lock()
self._reset()
asyncio.run_coroutine_threadsafe(self._restart(), network.asyncio_loop)
network.register_callback(self._restart, ['default_server_changed'])
def _reset(self):
"""Initialise fields. Called every time the underlying
server connection changes.
"""
self.group = SilentTaskGroup()
async def _start(self, interface: 'Interface'):
self.interface = interface
await interface.group.spawn(self._start_tasks)
async def _start_tasks(self):
"""Start tasks in self.group. Called every time the underlying
server connection changes.
"""
raise NotImplementedError() # implemented by subclasses
async def stop(self):
await self.group.cancel_remaining()
@log_exceptions
async def _restart(self, *args):
interface = self.network.interface
if interface is None:
return # we should get called again soon
async with self._restart_lock:
await self.stop()
self._reset()
await self._start(interface)
@property
def session(self):
s = self.interface.session
assert s is not None
return s
def create_and_start_event_loop() -> Tuple[asyncio.AbstractEventLoop,
asyncio.Future,
threading.Thread]:
def on_exception(loop, context):
"""Suppress spurious messages it appears we cannot control."""
SUPPRESS_MESSAGE_REGEX = re.compile('SSL handshake|Fatal read error on|'
'SSL error in data received')
message = context.get('message')
if message and SUPPRESS_MESSAGE_REGEX.match(message):
return
loop.default_exception_handler(context)
loop = asyncio.get_event_loop()
loop.set_exception_handler(on_exception)
# loop.set_debug(1)
stopping_fut = asyncio.Future()
loop_thread = threading.Thread(target=loop.run_until_complete,
args=(stopping_fut,),
name='EventLoop')
loop_thread.start()
return loop, stopping_fut, loop_thread
| []
| []
| [
"APPDATA",
"HOME",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "HOME", "LOCALAPPDATA"] | python | 3 | 0 | |
examples/image/upload/uploadAnImageFromAUrlToMicrosCdn/main.go | package main
import (
"fmt"
"os"
"go.m3o.com/image"
)
// Upload an image by either sending a base64 encoded image to this endpoint or a URL.
// To resize an image before uploading, see the Resize endpoint.
// To use the file parameter you need to send the request as a multipart/form-data rather than the usual application/json
// with each parameter as a form field.
func main() {
imageService := image.NewImageService(os.Getenv("M3O_API_TOKEN"))
rsp, err := imageService.Upload(&image.UploadRequest{
Name: "cat.jpeg",
Url: "somewebsite.com/cat.png",
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
cmd/copy.go | package cmd
import (
. "github.com/jmulhern/rasputin/pkg/rasputin"
"github.com/jmulhern/rasputin/pkg/run"
"github.com/spf13/cobra"
"os"
)
const sshPort = "22"
var dialer run.Dialer
var sshPrivateKeyPath = os.Getenv("HOME") + "/.ssh/id_rsa"
var copyCommand = &cobra.Command{
Use: "copy [source] [destination]",
Aliases: []string{"cp"},
Args: cobra.ExactArgs(2),
RunE: CopyCommand,
}
func CopyCommand(cmd *cobra.Command, args []string) error {
sourcePath := args[0]
destinationPath := args[1]
copyArgs := CopyArgs{
PrivateKey: sshPrivateKeyPath,
Targets: []Target{
NewTarget(dialer, "10.0.1.200", sshPort, "pi"),
NewTarget(dialer, "10.0.1.201", sshPort, "pi"),
},
Source: sourcePath,
Destination: destinationPath,
}
return Copy(copyArgs)
}
func init() {
dialer = run.NewRemoteDialer("pi")
rootCmd.AddCommand(copyCommand)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
vega/core/pipeline/fully_train_pipe_step.py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Fully Train PipeStep that used in Pipeline."""
import os
import logging
import subprocess
import pickle
import vega
from .pipe_step import PipeStep
from zeus.common import ClassFactory, ClassType
from zeus.common import FileOps, TaskOps
from ..scheduler import create_master
from zeus.common.general import General
from zeus.report import Report, ReportRecord
from vega.core.pipeline.conf import PipeStepConfig, PipelineConfig
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.PIPE_STEP)
class FullyTrainPipeStep(PipeStep):
"""FullyTrainPipeStep is the implementation class of PipeStep.
Fully train is the last pipe step in pipeline, we provide horovrd or local trainer
for user to choose.
"""
def __init__(self):
super().__init__()
logger.info("init FullyTrainPipeStep...")
def do(self):
"""Start to run fully train with horovod or local trainer."""
logger.info("FullyTrainPipeStep started...")
cls_trainer = ClassFactory.get_cls('trainer', "Trainer")
if cls_trainer.config.distributed:
self._do_distributed_fully_train()
else:
records = self._get_current_step_records()
logger.debug("load pipestep records: {}".format(records))
self.master = create_master(update_func=Report().update_report)
self._train_multi_models(records)
Report().output_step_all_records(
step_name=self.task.step_name, weights_file=True, performance=True)
self.master.close_client()
Report().backup_output_path()
def _get_current_step_records(self):
step_name = self.task.step_name
models_folder = PipeStepConfig.pipe_step.get("models_folder")
records = []
cur_index = PipelineConfig.steps.index(step_name)
if cur_index >= 1 or models_folder:
# records = Report().get_pareto_front_records(PipelineConfig.steps[cur_index - 1])
if not models_folder:
models_folder = FileOps.join_path(
TaskOps().local_output_path, PipelineConfig.steps[cur_index - 1])
models_folder = models_folder.replace(
"{local_base_path}", TaskOps().local_base_path)
records = Report().load_records_from_model_folder(models_folder)
else:
records = [ReportRecord(step_name, 0)]
logging.debug("Records: {}".format(records))
for record in records:
record.step_name = step_name
return records
def _train_single_model(self, model_desc=None, model_id=None):
cls_trainer = ClassFactory.get_cls('trainer', "Trainer")
step_name = self.task.step_name
if model_desc is not None:
sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name)
record = ReportRecord().load_dict(sample)
logging.debug("Broadcast Record=%s", str(record))
Report().broadcast(record)
trainer = cls_trainer(model_desc=model_desc, id=model_id)
else:
trainer = cls_trainer(None, 0)
# resume training
if vega.is_torch_backend() and General._resume:
trainer.load_checkpoint = True
trainer._resume_training = True
if cls_trainer.config.distributed:
self._do_distributed_fully_train()
else:
self._do_single_fully_train(trainer)
def _train_single_gpu_model(self, trainer):
evaluator = self._get_evaluator(trainer.worker_id)
self.master.run(trainer, evaluator)
def _train_single_npu_model(self, trainer):
temp_rank_file = os.environ.get('RANK_TABLE_FILE', None)
temp_rank_size = os.environ['RANK_SIZE']
os.environ.pop('RANK_TABLE_FILE', None)
os.environ['RANK_SIZE'] = '1'
evaluator = self._get_evaluator(trainer.worker_id)
self.master.run(trainer, evaluator)
if temp_rank_file is not None:
os.environ['RANK_TABLE_FILE'] = temp_rank_file
os.environ['RANK_SIZE'] = temp_rank_size
def _do_single_fully_train(self, trainer):
if os.environ['DEVICE_CATEGORY'] == 'GPU':
self._train_single_gpu_model(trainer)
elif os.environ['DEVICE_CATEGORY'] == 'NPU':
self._train_single_npu_model(trainer)
def _train_multi_models(self, records):
for record in records:
self._train_single_model(record.desc, record.worker_id)
self.master.join()
def _get_evaluator(self, worker_id):
if not PipeStepConfig.evaluator_enable:
return None
cls_evaluator = ClassFactory.get_cls('evaluator', "Evaluator")
evaluator = cls_evaluator({"step_name": self.task.step_name, "worker_id": worker_id})
return evaluator
def _do_horovod_fully_train(self):
pwd_dir = os.path.dirname(os.path.abspath(__file__))
cf_file = os.path.join(pwd_dir, 'cf.pickle')
cf_content = {'registry': ClassFactory.__registry__,
'general_config': General().to_json(),
'pipe_step_config': PipeStepConfig().to_json()}
with open(cf_file, 'wb') as f:
pickle.dump(cf_content, f)
cf_file_remote = os.path.join(self.task.local_base_path, 'cf.pickle')
FileOps.copy_file(cf_file, cf_file_remote)
if os.environ.get('DLS_TASK_NUMBER') is None:
# local cluster
worker_ips = '127.0.0.1'
if General.cluster.master_ip is not None and General.cluster.master_ip != '127.0.0.1':
worker_ips = General.cluster.master_ip
for ip in General.cluster.slaves:
worker_ips = worker_ips + ',' + ip
cmd = ['bash', '{}/horovod/run_cluster_horovod_train.sh'.format(pwd_dir),
str(self.world_device_size), cf_file_remote, worker_ips]
else:
# Roma
cmd = ['bash', '{}/horovod/run_horovod_train.sh'.format(pwd_dir),
str(self.world_device_size), cf_file_remote]
proc = subprocess.Popen(cmd, env=os.environ)
proc.wait()
def _do_hccl_fully_train(self):
origin_parallel_fully_train = General.parallel_fully_train
General.parallel_fully_train = True
General.dft = True
cls_trainer = ClassFactory.get_cls('trainer', "Trainer")
self.master = create_master()
workers_num = int(os.environ['RANK_SIZE'])
for i in range(workers_num):
trainer = cls_trainer(None, id=i)
evaluator = self._get_evaluator(trainer.worker_id)
self.master.run(trainer, evaluator)
self.master.join()
self.master.shutdown()
General.parallel_fully_train = origin_parallel_fully_train
General.dft = False
def _do_distributed_fully_train(self):
if os.environ['DEVICE_CATEGORY'] == 'GPU':
self._do_horovod_fully_train()
elif os.environ['DEVICE_CATEGORY'] == 'NPU':
self._do_hccl_fully_train()
@property
def world_device_size(self):
"""World device size is world size * device count in each world."""
import torch
world_size = General.env.world_size
device_nums = torch.cuda.device_count()
num_devices = world_size * device_nums
return num_devices
| []
| []
| [
"DEVICE_CATEGORY",
"RANK_SIZE",
"RANK_TABLE_FILE",
"DLS_TASK_NUMBER"
]
| [] | ["DEVICE_CATEGORY", "RANK_SIZE", "RANK_TABLE_FILE", "DLS_TASK_NUMBER"] | python | 4 | 0 | |
actions.go | // Copyright 2020 The Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package githubactions provides an SDK for authoring GitHub Actions in Go. It
// has no external dependencies and provides a Go-like interface for interacting
// with GitHub Actions' build system.
package githubactions
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"time"
)
var (
// osExit allows `os.Exit()` to be stubbed during testing.
osExit = os.Exit
)
const (
addMaskCmd = "add-mask"
setOutputCmd = "set-output"
saveStateCmd = "save-state"
addPathCmd = "add-path" // used when issuing the regular command
pathCmd = "path" // used when issuing the file command
setEnvCmd = "set-env" // used when issuing the regular command
envCmd = "env" // used when issuing the file command
envCmdMsgFmt = "%s<<%s" + EOF + "%s" + EOF + "%s" // ${name}<<${delimiter}${os.EOL}${convertedVal}${os.EOL}${delimiter}
envCmdDelimiter = "_GitHubActionsFileCommandDelimeter_"
addMatcherCmd = "add-matcher"
removeMatcherCmd = "remove-matcher"
groupCmd = "group"
endGroupCmd = "endgroup"
debugCmd = "debug"
noticeCmd = "notice"
warningCmd = "warning"
errorCmd = "error"
errFileCmdFmt = "unable to write command to the environment file: %s"
)
// New creates a new wrapper with helpers for outputting information in GitHub
// actions format.
func New(opts ...Option) *Action {
a := &Action{
w: os.Stdout,
getenv: os.Getenv,
httpClient: &http.Client{
Timeout: 10 * time.Second,
},
}
for _, opt := range opts {
if opt == nil {
continue
}
a = opt(a)
}
return a
}
// NewWithWriter creates a wrapper using the given writer. This is useful for
// tests. The given writer cannot add any prefixes to the string, since GitHub
// requires these special strings to match a very particular format.
//
// Deprecated: Use New() with WithWriter instead.
func NewWithWriter(w io.Writer) *Action {
return New(WithWriter(w))
}
// Action is an internal wrapper around GitHub Actions' output and magic
// strings.
type Action struct {
w io.Writer
fields CommandProperties
getenv GetenvFunc
httpClient *http.Client
}
// IssueCommand issues a new GitHub actions Command.
func (c *Action) IssueCommand(cmd *Command) {
fmt.Fprint(c.w, cmd.String()+EOF)
}
// IssueFileCommand issues a new GitHub actions Command using environment files.
//
// https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#environment-files
//
// The TypeScript equivalent function:
//
// https://github.com/actions/toolkit/blob/4f7fb6513a355689f69f0849edeb369a4dc81729/packages/core/src/file-command.ts#L10-L23
//
// IssueFileCommand currently ignores the 'CommandProperties' field provided
// with the 'Command' argument as it's scope is unclear in the current
// TypeScript implementation.
func (c *Action) IssueFileCommand(cmd *Command) error {
e := strings.ReplaceAll(cmd.Name, "-", "_")
e = strings.ToUpper(e)
e = "GITHUB_" + e
filepath := c.getenv(e)
msg := []byte(cmd.Message + EOF)
if err := ioutil.WriteFile(filepath, msg, os.ModeAppend); err != nil {
return fmt.Errorf(errFileCmdFmt, err)
}
return nil
}
// AddMask adds a new field mask for the given string "p". After called, future
// attempts to log "p" will be replaced with "***" in log output.
func (c *Action) AddMask(p string) {
// ::add-mask::<p>
c.IssueCommand(&Command{
Name: addMaskCmd,
Message: p,
})
}
// AddMatcher adds a new matcher with the given file path.
func (c *Action) AddMatcher(p string) {
// ::add-matcher::<p>
c.IssueCommand(&Command{
Name: addMatcherCmd,
Message: p,
})
}
// RemoveMatcher removes a matcher with the given owner name.
func (c *Action) RemoveMatcher(o string) {
// ::remove-matcher owner=<o>::
c.IssueCommand(&Command{
Name: removeMatcherCmd,
Properties: CommandProperties{
"owner": o,
},
})
}
// AddPath adds the string "p" to the path for the invocation. It attempts to
// issue a file command at first. If that fails, it falls back to the regular
// (now deprecated) 'add-path' command, which may stop working in the future.
// The deprecated fallback may be useful for users running an older version of
// GitHub runner.
//
// https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#adding-a-system-path
// https://github.blog/changelog/2020-10-01-github-actions-deprecating-set-env-and-add-path-commands/
func (c *Action) AddPath(p string) {
err := c.IssueFileCommand(&Command{
Name: pathCmd,
Message: p,
})
if err != nil { // use regular command as fallback
// ::add-path::<p>
c.IssueCommand(&Command{
Name: addPathCmd,
Message: p,
})
}
}
// SaveState saves state to be used in the "finally" post job entry point.
func (c *Action) SaveState(k, v string) {
// ::save-state name=<k>::<v>
c.IssueCommand(&Command{
Name: saveStateCmd,
Message: v,
Properties: CommandProperties{
"name": k,
},
})
}
// GetInput gets the input by the given name.
func (c *Action) GetInput(i string) string {
e := strings.ReplaceAll(i, " ", "_")
e = strings.ToUpper(e)
e = "INPUT_" + e
return strings.TrimSpace(c.getenv(e))
}
// Group starts a new collapsable region up to the next ungroup invocation.
func (c *Action) Group(t string) {
// ::group::<t>
c.IssueCommand(&Command{
Name: groupCmd,
Message: t,
})
}
// EndGroup ends the current group.
func (c *Action) EndGroup() {
// ::endgroup::
c.IssueCommand(&Command{
Name: endGroupCmd,
})
}
// SetEnv sets an environment variable. It attempts to issue a file command at
// first. If that fails, it falls back to the regular (now deprecated) 'set-env'
// command, which may stop working in the future. The deprecated fallback may be
// useful for users running an older version of GitHub runner.
//
// https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-commands-for-github-actions#setting-an-environment-variable
// https://github.blog/changelog/2020-10-01-github-actions-deprecating-set-env-and-add-path-commands/
func (c *Action) SetEnv(k, v string) {
err := c.IssueFileCommand(&Command{
Name: envCmd,
Message: fmt.Sprintf(envCmdMsgFmt, k, envCmdDelimiter, v, envCmdDelimiter),
})
if err != nil { // use regular command as fallback
// ::set-env name=<k>::<v>
c.IssueCommand(&Command{
Name: setEnvCmd,
Message: v,
Properties: CommandProperties{
"name": k,
},
})
}
}
// SetOutput sets an output parameter.
func (c *Action) SetOutput(k, v string) {
// ::set-output name=<k>::<v>
c.IssueCommand(&Command{
Name: setOutputCmd,
Message: v,
Properties: CommandProperties{
"name": k,
},
})
}
// Debugf prints a debug-level message. It follows the standard fmt.Printf
// arguments, appending an OS-specific line break to the end of the message.
func (c *Action) Debugf(msg string, args ...interface{}) {
// ::debug <c.fields>::<msg, args>
c.IssueCommand(&Command{
Name: debugCmd,
Message: fmt.Sprintf(msg, args...),
Properties: c.fields,
})
}
// Noticef prints a notice-level message. It follows the standard fmt.Printf
// arguments, appending an OS-specific line break to the end of the message.
func (c *Action) Noticef(msg string, args ...interface{}) {
// ::notice <c.fields>::<msg, args>
c.IssueCommand(&Command{
Name: noticeCmd,
Message: fmt.Sprintf(msg, args...),
Properties: c.fields,
})
}
// Warningf prints a warning-level message. It follows the standard fmt.Printf
// arguments, appending an OS-specific line break to the end of the message.
func (c *Action) Warningf(msg string, args ...interface{}) {
// ::warning <c.fields>::<msg, args>
c.IssueCommand(&Command{
Name: warningCmd,
Message: fmt.Sprintf(msg, args...),
Properties: c.fields,
})
}
// Errorf prints a error-level message. It follows the standard fmt.Printf
// arguments, appending an OS-specific line break to the end of the message.
func (c *Action) Errorf(msg string, args ...interface{}) {
// ::error <c.fields>::<msg, args>
c.IssueCommand(&Command{
Name: errorCmd,
Message: fmt.Sprintf(msg, args...),
Properties: c.fields,
})
}
// Fatalf prints a error-level message and exits. This is equivalent to Errorf
// followed by os.Exit(1).
func (c *Action) Fatalf(msg string, args ...interface{}) {
c.Errorf(msg, args...)
osExit(1)
}
// Infof prints message to stdout without any level annotations. It follows the standard fmt.Printf
// arguments, appending an OS-specific line break to the end of the message.
func (c *Action) Infof(msg string, args ...interface{}) {
fmt.Fprintf(c.w, msg+EOF, args...)
}
// WithFieldsSlice includes the provided fields in log output. "f" must be a
// slice of k=v pairs. The given slice will be sorted. It panics if any of the
// string in the given slice does not construct a valid 'key=value' pair.
func (c *Action) WithFieldsSlice(f []string) *Action {
m := make(CommandProperties)
for _, s := range f {
pair := strings.SplitN(s, "=", 2)
if len(pair) < 2 {
panic(fmt.Sprintf("%q is not a proper k=v pair!", s))
}
m[pair[0]] = pair[1]
}
return c.WithFieldsMap(m)
}
// WithFieldsMap includes the provided fields in log output. The fields in "m"
// are automatically converted to k=v pairs and sorted.
func (c *Action) WithFieldsMap(m map[string]string) *Action {
return &Action{
w: c.w,
fields: m,
getenv: c.getenv,
}
}
// idTokenResponse is the response from minting an ID token.
type idTokenResponse struct {
Value string `json:"value,omitempty"`
}
// GetIDToken returns the GitHub OIDC token from the GitHub Actions runtime.
func (c *Action) GetIDToken(ctx context.Context, audience string) (string, error) {
requestURL := c.getenv("ACTIONS_ID_TOKEN_REQUEST_URL")
if requestURL == "" {
return "", fmt.Errorf("missing ACTIONS_ID_TOKEN_REQUEST_URL in environment")
}
requestToken := c.getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN")
if requestToken == "" {
return "", fmt.Errorf("missing ACTIONS_ID_TOKEN_REQUEST_TOKEN in environment")
}
u, err := url.Parse(requestURL)
if err != nil {
return "", fmt.Errorf("failed to parse request URL: %w", err)
}
if audience != "" {
q := u.Query()
q.Set("audience", audience)
u.RawQuery = q.Encode()
}
req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
if err != nil {
return "", fmt.Errorf("failed to create HTTP request: %w", err)
}
req.Header.Set("Authorization", "Bearer "+requestToken)
resp, err := c.httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to make HTTP request: %w", err)
}
defer resp.Body.Close()
// This has moved to the io package in Go 1.16, but we still support up to Go
// 1.13 for now.
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 64*1000))
if err != nil {
return "", fmt.Errorf("failed to read response body: %w", err)
}
body = bytes.TrimSpace(body)
if resp.StatusCode != 200 {
return "", fmt.Errorf("non-successful response from minting OIDC token: %s", body)
}
var tokenResp idTokenResponse
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", fmt.Errorf("failed to process response as JSON: %w", err)
}
return tokenResp.Value, nil
}
// GetenvFunc is an abstraction to make tests feasible for commands that
// interact with environment variables.
type GetenvFunc func(k string) string
| []
| []
| []
| [] | [] | go | 0 | 0 | |
dataPipelines/scripts/matching.py | import os
import sys
import shutil
from pathlib import Path
raw_docs_dir = Path(os.environ["RAW_DOCS"]).absolute()
parsed_docs_dir = Path(os.environ["PARSED_DOCS"]).absolute()
output_base_dir = Path(os.environ["OUTPUT_BASE"]).absolute()
output_unparsed = Path(output_base_dir, "unparsed")
output_raw = Path(output_base_dir, "raw")
output_parsed = Path(output_base_dir, "parsed")
output_base_dir.mkdir(exist_ok=True)
output_unparsed.mkdir(exist_ok=True)
output_raw.mkdir(exist_ok=True)
output_parsed.mkdir(exist_ok=True)
raw_docs = [x for x in raw_docs_dir.iterdir() if x.name.lower().endswith("pdf") or x.name.lower().endswith("html")]
crawler_output_file = Path(raw_docs_dir, "crawler_output.json")
for raw_file in raw_docs:
metadata_file = Path(raw_docs_dir, raw_file.name + ".metadata")
parsed_file = Path(parsed_docs_dir, raw_file.with_suffix(".json").name)
if parsed_file.exists():
shutil.copy(parsed_file, output_parsed)
shutil.copy(raw_file, output_raw)
shutil.copy(metadata_file, output_raw)
else:
shutil.copy(raw_file, output_unparsed)
shutil.copy(metadata_file, output_unparsed)
if crawler_output_file.exists():
shutil.copy(crawler_output_file, output_unparsed)
shutil.copy(crawler_output_file, output_raw)
| []
| []
| [
"PARSED_DOCS",
"OUTPUT_BASE",
"RAW_DOCS"
]
| [] | ["PARSED_DOCS", "OUTPUT_BASE", "RAW_DOCS"] | python | 3 | 0 | |
13.go | package tls
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/hmac"
"crypto/rsa"
"crypto/subtle"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"log"
"os"
"runtime"
"runtime/debug"
"strings"
"sync/atomic"
"time"
sidh "github_com/cloudflare/sidh/sidh"
"golang_org/x/crypto/curve25519"
)
// numSessionTickets is the number of different session tickets the
// server sends to a TLS 1.3 client, who will use each only once.
const numSessionTickets = 2
type secretLabel int
const (
x25519SharedSecretSz = 32
P503PubKeySz = 378
P503PrvKeySz = 32
P503SharedSecretSz = 126
SidhP503Curve25519PubKeySz = x25519SharedSecretSz + P503PubKeySz
SidhP503Curve25519PrvKeySz = x25519SharedSecretSz + P503PrvKeySz
SidhP503Curve25519SharedKeySz = x25519SharedSecretSz + P503SharedSecretSz
)
const (
secretResumptionPskBinder secretLabel = iota
secretEarlyClient
secretHandshakeClient
secretHandshakeServer
secretApplicationClient
secretApplicationServer
secretResumption
)
type keySchedule13 struct {
suite *cipherSuite
transcriptHash hash.Hash // uses the cipher suite hash algo
secret []byte // Current secret as used for Derive-Secret
handshakeCtx []byte // cached handshake context, invalidated on updates.
clientRandom []byte // Used for keylogging, nil if keylogging is disabled.
config *Config // Used for KeyLogWriter callback, nil if keylogging is disabled.
}
// Interface implemented by DH key exchange strategies
type dhKex interface {
// c - context of current TLS handshake, groupId - ID of an algorithm
// (curve/field) being chosen for key agreement. Methods implmenting an
// interface always assume that provided groupId is correct.
//
// In case of success, function returns secret key and ephemeral key. Otherwise
// error is set.
generate(c *Conn, groupId CurveID) ([]byte, keyShare, error)
// c - context of current TLS handshake, ks - public key received
// from the other side of the connection, secretKey - is a private key
// used for DH key agreement. Function returns shared secret in case
// of success or empty slice otherwise.
derive(c *Conn, ks keyShare, secretKey []byte) []byte
}
// Key Exchange strategies per curve type
type kexNist struct{} // Used by NIST curves; P-256, P-384, P-512
type kexX25519 struct{} // Used by X25519
type kexSidhP503 struct{} // Used by SIDH/P503
type kexHybridSidhP503X25519 struct{} // Used by SIDH-ECDH hybrid scheme
// Routing map for key exchange strategies
var dhKexStrat = map[CurveID]dhKex{
CurveP256: &kexNist{},
CurveP384: &kexNist{},
CurveP521: &kexNist{},
X25519: &kexX25519{},
sidhP503: &kexSidhP503{},
HybridSidhP503Curve25519: &kexHybridSidhP503X25519{},
}
func newKeySchedule13(suite *cipherSuite, config *Config, clientRandom []byte) *keySchedule13 {
if config.KeyLogWriter == nil {
clientRandom = nil
config = nil
}
return &keySchedule13{
suite: suite,
transcriptHash: hashForSuite(suite).New(),
clientRandom: clientRandom,
config: config,
}
}
// setSecret sets the early/handshake/master secret based on the given secret
// (IKM). The salt is based on previous secrets (nil for the early secret).
func (ks *keySchedule13) setSecret(secret []byte) {
hash := hashForSuite(ks.suite)
salt := ks.secret
if salt != nil {
h0 := hash.New().Sum(nil)
salt = hkdfExpandLabel(hash, salt, h0, "derived", hash.Size())
}
ks.secret = hkdfExtract(hash, secret, salt)
}
// Depending on role returns pair of key variant to be used by
// local and remote process.
func getSidhKeyVariant(isClient bool) (sidh.KeyVariant, sidh.KeyVariant) {
if isClient {
return sidh.KeyVariant_SIDH_A, sidh.KeyVariant_SIDH_B
}
return sidh.KeyVariant_SIDH_B, sidh.KeyVariant_SIDH_A
}
// write appends the data to the transcript hash context.
func (ks *keySchedule13) write(data []byte) {
ks.handshakeCtx = nil
ks.transcriptHash.Write(data)
}
func (ks *keySchedule13) getLabel(secretLabel secretLabel) (label, keylogType string) {
switch secretLabel {
case secretResumptionPskBinder:
label = "res binder"
case secretEarlyClient:
label = "c e traffic"
keylogType = "CLIENT_EARLY_TRAFFIC_SECRET"
case secretHandshakeClient:
label = "c hs traffic"
keylogType = "CLIENT_HANDSHAKE_TRAFFIC_SECRET"
case secretHandshakeServer:
label = "s hs traffic"
keylogType = "SERVER_HANDSHAKE_TRAFFIC_SECRET"
case secretApplicationClient:
label = "c ap traffic"
keylogType = "CLIENT_TRAFFIC_SECRET_0"
case secretApplicationServer:
label = "s ap traffic"
keylogType = "SERVER_TRAFFIC_SECRET_0"
case secretResumption:
label = "res master"
}
return
}
// deriveSecret returns the secret derived from the handshake context and label.
func (ks *keySchedule13) deriveSecret(secretLabel secretLabel) []byte {
label, keylogType := ks.getLabel(secretLabel)
if ks.handshakeCtx == nil {
ks.handshakeCtx = ks.transcriptHash.Sum(nil)
}
hash := hashForSuite(ks.suite)
secret := hkdfExpandLabel(hash, ks.secret, ks.handshakeCtx, label, hash.Size())
if keylogType != "" && ks.config != nil {
ks.config.writeKeyLog(keylogType, ks.clientRandom, secret)
}
return secret
}
func (ks *keySchedule13) prepareCipher(secretLabel secretLabel) (interface{}, []byte) {
trafficSecret := ks.deriveSecret(secretLabel)
hash := hashForSuite(ks.suite)
key := hkdfExpandLabel(hash, trafficSecret, nil, "key", ks.suite.keyLen)
iv := hkdfExpandLabel(hash, trafficSecret, nil, "iv", ks.suite.ivLen)
return ks.suite.aead(key, iv), trafficSecret
}
func (hs *serverHandshakeState) doTLS13Handshake() error {
config := hs.c.config
c := hs.c
hs.c.cipherSuite, hs.hello.cipherSuite = hs.suite.id, hs.suite.id
hs.c.clientHello = hs.clientHello.marshal()
// When picking the group for the handshake, priority is given to groups
// that the client provided a keyShare for, so to avoid a round-trip.
// After that the order of CurvePreferences is respected.
var ks keyShare
CurvePreferenceLoop:
for _, curveID := range config.curvePreferences() {
for _, keyShare := range hs.clientHello.keyShares {
if curveID == keyShare.group {
ks = keyShare
break CurvePreferenceLoop
}
}
}
if ks.group == 0 {
c.sendAlert(alertInternalError)
return errors.New("tls: HelloRetryRequest not implemented") // TODO(filippo)
}
privateKey, serverKS, err := c.generateKeyShare(ks.group)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
hs.hello.keyShare = serverKS
hash := hashForSuite(hs.suite)
hashSize := hash.Size()
hs.keySchedule = newKeySchedule13(hs.suite, config, hs.clientHello.random)
// Check for PSK and update key schedule with new early secret key
isResumed, pskAlert := hs.checkPSK()
switch {
case pskAlert != alertSuccess:
c.sendAlert(pskAlert)
return errors.New("tls: invalid client PSK")
case !isResumed:
// apply an empty PSK if not resumed.
hs.keySchedule.setSecret(nil)
case isResumed:
c.didResume = true
}
hs.keySchedule.write(hs.clientHello.marshal())
earlyClientCipher, _ := hs.keySchedule.prepareCipher(secretEarlyClient)
ecdheSecret := c.deriveDHESecret(ks, privateKey)
if ecdheSecret == nil {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: bad ECDHE client share")
}
hs.keySchedule.write(hs.hello.marshal())
if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
return err
}
// middlebox compatibility mode: send CCS after first handshake message
if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
return err
}
hs.keySchedule.setSecret(ecdheSecret)
clientCipher, cTrafficSecret := hs.keySchedule.prepareCipher(secretHandshakeClient)
hs.hsClientCipher = clientCipher
serverCipher, sTrafficSecret := hs.keySchedule.prepareCipher(secretHandshakeServer)
c.out.setCipher(c.vers, serverCipher)
serverFinishedKey := hkdfExpandLabel(hash, sTrafficSecret, nil, "finished", hashSize)
hs.clientFinishedKey = hkdfExpandLabel(hash, cTrafficSecret, nil, "finished", hashSize)
// EncryptedExtensions
hs.keySchedule.write(hs.hello13Enc.marshal())
if _, err := c.writeRecord(recordTypeHandshake, hs.hello13Enc.marshal()); err != nil {
return err
}
// TODO: we should have 2 separated methods - one for full-handshake and the other for PSK-handshake
if !c.didResume {
// Server MUST NOT send CertificateRequest if authenticating with PSK
if c.config.ClientAuth >= RequestClientCert {
certReq := new(certificateRequestMsg13)
// extension 'signature_algorithms' MUST be specified
certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms13
certReq.supportedSignatureAlgorithmsCert = supportedSigAlgorithmsCert(supportedSignatureAlgorithms13)
hs.keySchedule.write(certReq.marshal())
if _, err := hs.c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
return err
}
}
if err := hs.sendCertificate13(); err != nil {
return err
}
}
verifyData := hmacOfSum(hash, hs.keySchedule.transcriptHash, serverFinishedKey)
serverFinished := &finishedMsg{
verifyData: verifyData,
}
hs.keySchedule.write(serverFinished.marshal())
if _, err := c.writeRecord(recordTypeHandshake, serverFinished.marshal()); err != nil {
return err
}
hs.keySchedule.setSecret(nil) // derive master secret
hs.appClientCipher, _ = hs.keySchedule.prepareCipher(secretApplicationClient)
serverCipher, _ = hs.keySchedule.prepareCipher(secretApplicationServer)
c.out.setCipher(c.vers, serverCipher)
if c.hand.Len() > 0 {
return c.sendAlert(alertUnexpectedMessage)
}
if hs.hello13Enc.earlyData {
c.in.setCipher(c.vers, earlyClientCipher)
c.phase = readingEarlyData
} else if hs.clientHello.earlyData {
c.in.setCipher(c.vers, hs.hsClientCipher)
c.phase = discardingEarlyData
} else {
c.in.setCipher(c.vers, hs.hsClientCipher)
c.phase = waitingClientFinished
}
return nil
}
// readClientFinished13 is called during the server handshake (when no early
// data it available) or after reading all early data. It discards early data if
// the server did not accept it and then verifies the Finished message. Once
// done it sends the session tickets. Under c.in lock.
func (hs *serverHandshakeState) readClientFinished13(hasConfirmLock bool) error {
c := hs.c
// If the client advertised and sends early data while the server does
// not accept it, it must be fully skipped until the Finished message.
for c.phase == discardingEarlyData {
if err := c.readRecord(recordTypeApplicationData); err != nil {
return err
}
// Assume receipt of Finished message (will be checked below).
if c.hand.Len() > 0 {
c.phase = waitingClientFinished
break
}
}
// If the client sends early data followed by a Finished message (but
// no end_of_early_data), the server MUST terminate the connection.
if c.phase != waitingClientFinished {
c.sendAlert(alertUnexpectedMessage)
return errors.New("tls: did not expect Client Finished yet")
}
c.phase = readingClientFinished
msg, err := c.readHandshake()
if err != nil {
return err
}
// client authentication
// (4.4.2) Client MUST send certificate msg if requested by server
if c.config.ClientAuth >= RequestClientCert && !c.didResume {
certMsg, ok := msg.(*certificateMsg13)
if !ok {
c.sendAlert(alertCertificateRequired)
return unexpectedMessageError(certMsg, msg)
}
hs.keySchedule.write(certMsg.marshal())
certs := getCertsFromEntries(certMsg.certificates)
pubKey, err := hs.processCertsFromClient(certs)
if err != nil {
return err
}
if len(certs) > 0 {
// 4.4.3: CertificateVerify MUST appear immediately after Certificate msg
msg, err = c.readHandshake()
if err != nil {
return err
}
certVerify, ok := msg.(*certificateVerifyMsg)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certVerify, msg)
}
err, alertCode := verifyPeerHandshakeSignature(
certVerify,
pubKey,
supportedSignatureAlgorithms13,
hs.keySchedule.transcriptHash.Sum(nil),
"TLS 1.3, client CertificateVerify")
if err != nil {
c.sendAlert(alertCode)
return err
}
hs.keySchedule.write(certVerify.marshal())
}
// Read next chunk
msg, err = c.readHandshake()
if err != nil {
return err
}
}
clientFinished, ok := msg.(*finishedMsg)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(clientFinished, msg)
}
hash := hashForSuite(hs.suite)
expectedVerifyData := hmacOfSum(hash, hs.keySchedule.transcriptHash, hs.clientFinishedKey)
if len(expectedVerifyData) != len(clientFinished.verifyData) ||
subtle.ConstantTimeCompare(expectedVerifyData, clientFinished.verifyData) != 1 {
c.sendAlert(alertDecryptError)
return errors.New("tls: client's Finished message is incorrect")
}
hs.keySchedule.write(clientFinished.marshal())
c.hs = nil // Discard the server handshake state
if c.hand.Len() > 0 {
return c.sendAlert(alertUnexpectedMessage)
}
c.in.setCipher(c.vers, hs.appClientCipher)
c.in.traceErr, c.out.traceErr = nil, nil
c.phase = handshakeConfirmed
atomic.StoreInt32(&c.handshakeConfirmed, 1)
// Any read operation after handshakeRunning and before handshakeConfirmed
// will be holding this lock, which we release as soon as the confirmation
// happens, even if the Read call might do more work.
// If a Handshake is pending, c.confirmMutex will never be locked as
// ConfirmHandshake will wait for the handshake to complete. If a
// handshake was complete, and this was a confirmation, unlock
// c.confirmMutex now to allow readers to proceed.
if hasConfirmLock {
c.confirmMutex.Unlock()
}
return hs.sendSessionTicket13() // TODO: do in a goroutine
}
func (hs *serverHandshakeState) sendCertificate13() error {
c := hs.c
certEntries := []certificateEntry{}
for _, cert := range hs.cert.Certificate {
certEntries = append(certEntries, certificateEntry{data: cert})
}
if len(certEntries) > 0 && hs.clientHello.ocspStapling {
certEntries[0].ocspStaple = hs.cert.OCSPStaple
}
if len(certEntries) > 0 && hs.clientHello.scts {
certEntries[0].sctList = hs.cert.SignedCertificateTimestamps
}
// If hs.delegatedCredential is set (see hs.readClientHello()) then the
// server is using the delegated credential extension. The DC is added as an
// extension to the end-entity certificate, i.e., the last CertificateEntry
// of Certificate.certficate_list. (For details, see
// https://tools.ietf.org/html/draft-ietf-tls-subcerts-02.)
if len(certEntries) > 0 && hs.clientHello.delegatedCredential && hs.delegatedCredential != nil {
certEntries[0].delegatedCredential = hs.delegatedCredential
}
certMsg := &certificateMsg13{certificates: certEntries}
hs.keySchedule.write(certMsg.marshal())
if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
return err
}
sigScheme, err := hs.selectTLS13SignatureScheme()
if err != nil {
c.sendAlert(alertInternalError)
return err
}
sigHash := hashForSignatureScheme(sigScheme)
opts := crypto.SignerOpts(sigHash)
if signatureSchemeIsPSS(sigScheme) {
opts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash}
}
toSign := prepareDigitallySigned(sigHash, "TLS 1.3, server CertificateVerify", hs.keySchedule.transcriptHash.Sum(nil))
signature, err := hs.privateKey.(crypto.Signer).Sign(c.config.rand(), toSign[:], opts)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
verifyMsg := &certificateVerifyMsg{
hasSignatureAndHash: true,
signatureAlgorithm: sigScheme,
signature: signature,
}
hs.keySchedule.write(verifyMsg.marshal())
if _, err := c.writeRecord(recordTypeHandshake, verifyMsg.marshal()); err != nil {
return err
}
return nil
}
func (c *Conn) handleEndOfEarlyData() error {
if c.phase != readingEarlyData || c.vers < VersionTLS13 {
return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
msg, err := c.readHandshake()
if err != nil {
return err
}
endOfEarlyData, ok := msg.(*endOfEarlyDataMsg)
// No handshake messages are allowed after EOD.
if !ok || c.hand.Len() > 0 {
return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
c.hs.keySchedule.write(endOfEarlyData.marshal())
c.phase = waitingClientFinished
c.in.setCipher(c.vers, c.hs.hsClientCipher)
return nil
}
// selectTLS13SignatureScheme chooses the SignatureScheme for the CertificateVerify
// based on the certificate type and client supported schemes. If no overlap is found,
// a fallback is selected.
//
// See https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.4.1.2
func (hs *serverHandshakeState) selectTLS13SignatureScheme() (sigScheme SignatureScheme, err error) {
var supportedSchemes []SignatureScheme
signer, ok := hs.privateKey.(crypto.Signer)
if !ok {
return 0, errors.New("tls: private key does not implement crypto.Signer")
}
pk := signer.Public()
if _, ok := pk.(*rsa.PublicKey); ok {
sigScheme = PSSWithSHA256
supportedSchemes = []SignatureScheme{PSSWithSHA256, PSSWithSHA384, PSSWithSHA512}
} else if pk, ok := pk.(*ecdsa.PublicKey); ok {
switch pk.Curve {
case elliptic.P256():
sigScheme = ECDSAWithP256AndSHA256
supportedSchemes = []SignatureScheme{ECDSAWithP256AndSHA256}
case elliptic.P384():
sigScheme = ECDSAWithP384AndSHA384
supportedSchemes = []SignatureScheme{ECDSAWithP384AndSHA384}
case elliptic.P521():
sigScheme = ECDSAWithP521AndSHA512
supportedSchemes = []SignatureScheme{ECDSAWithP521AndSHA512}
default:
return 0, errors.New("tls: unknown ECDSA certificate curve")
}
} else {
return 0, errors.New("tls: unknown certificate key type")
}
for _, ss := range supportedSchemes {
for _, cs := range hs.clientHello.supportedSignatureAlgorithms {
if ss == cs {
return ss, nil
}
}
}
return sigScheme, nil
}
func signatureSchemeIsPSS(s SignatureScheme) bool {
return s == PSSWithSHA256 || s == PSSWithSHA384 || s == PSSWithSHA512
}
// hashForSignatureScheme returns the Hash used by a SignatureScheme which is
// supported by selectTLS13SignatureScheme.
func hashForSignatureScheme(ss SignatureScheme) crypto.Hash {
switch ss {
case PSSWithSHA256, ECDSAWithP256AndSHA256:
return crypto.SHA256
case PSSWithSHA384, ECDSAWithP384AndSHA384:
return crypto.SHA384
case PSSWithSHA512, ECDSAWithP521AndSHA512:
return crypto.SHA512
default:
panic("unsupported SignatureScheme passed to hashForSignatureScheme")
}
}
func hashForSuite(suite *cipherSuite) crypto.Hash {
if suite.flags&suiteSHA384 != 0 {
return crypto.SHA384
}
return crypto.SHA256
}
func prepareDigitallySigned(hash crypto.Hash, context string, data []byte) []byte {
message := bytes.Repeat([]byte{32}, 64)
message = append(message, context...)
message = append(message, 0)
message = append(message, data...)
h := hash.New()
h.Write(message)
return h.Sum(nil)
}
// generateKeyShare generates keypair. Private key is returned as first argument, public key
// is returned in keyShare.data. keyshare.curveID stores ID of the scheme used.
func (c *Conn) generateKeyShare(curveID CurveID) ([]byte, keyShare, error) {
if val, ok := dhKexStrat[curveID]; ok {
return val.generate(c, curveID)
}
return nil, keyShare{}, errors.New("tls: preferredCurves includes unsupported curve")
}
// DH key agreement. ks stores public key, secretKey stores private key used for ephemeral
// key agreement. Function returns shared secret in case of success or empty slice otherwise.
func (c *Conn) deriveDHESecret(ks keyShare, secretKey []byte) []byte {
if val, ok := dhKexStrat[ks.group]; ok {
return val.derive(c, ks, secretKey)
}
return nil
}
func hkdfExpandLabel(hash crypto.Hash, secret, hashValue []byte, label string, L int) []byte {
prefix := "tls13 "
hkdfLabel := make([]byte, 4+len(prefix)+len(label)+len(hashValue))
hkdfLabel[0] = byte(L >> 8)
hkdfLabel[1] = byte(L)
hkdfLabel[2] = byte(len(prefix) + len(label))
copy(hkdfLabel[3:], prefix)
z := hkdfLabel[3+len(prefix):]
copy(z, label)
z = z[len(label):]
z[0] = byte(len(hashValue))
copy(z[1:], hashValue)
return hkdfExpand(hash, secret, hkdfLabel, L)
}
func hmacOfSum(f crypto.Hash, hash hash.Hash, key []byte) []byte {
h := hmac.New(f.New, key)
h.Write(hash.Sum(nil))
return h.Sum(nil)
}
// Maximum allowed mismatch between the stated age of a ticket
// and the server-observed one. See
// https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.8.2.
const ticketAgeSkewAllowance = 10 * time.Second
// checkPSK tries to resume using a PSK, returning true (and updating the
// early secret in the key schedule) if the PSK was used and false otherwise.
func (hs *serverHandshakeState) checkPSK() (isResumed bool, alert alert) {
if hs.c.config.SessionTicketsDisabled {
return false, alertSuccess
}
foundDHE := false
for _, mode := range hs.clientHello.pskKeyExchangeModes {
if mode == pskDHEKeyExchange {
foundDHE = true
break
}
}
if !foundDHE {
return false, alertSuccess
}
hash := hashForSuite(hs.suite)
hashSize := hash.Size()
for i := range hs.clientHello.psks {
sessionTicket := append([]uint8{}, hs.clientHello.psks[i].identity...)
if hs.c.config.SessionTicketSealer != nil {
var ok bool
sessionTicket, ok = hs.c.config.SessionTicketSealer.Unseal(hs.clientHelloInfo(), sessionTicket)
if !ok {
continue
}
} else {
sessionTicket, _ = hs.c.decryptTicket(sessionTicket)
if sessionTicket == nil {
continue
}
}
s := &sessionState13{}
if s.unmarshal(sessionTicket) != alertSuccess {
continue
}
if s.vers != hs.c.vers {
continue
}
clientAge := time.Duration(hs.clientHello.psks[i].obfTicketAge-s.ageAdd) * time.Millisecond
serverAge := time.Since(time.Unix(int64(s.createdAt), 0))
if clientAge-serverAge > ticketAgeSkewAllowance || clientAge-serverAge < -ticketAgeSkewAllowance {
// XXX: NSS is off spec and sends obfuscated_ticket_age as seconds
clientAge = time.Duration(hs.clientHello.psks[i].obfTicketAge-s.ageAdd) * time.Second
if clientAge-serverAge > ticketAgeSkewAllowance || clientAge-serverAge < -ticketAgeSkewAllowance {
continue
}
}
// This enforces the stricter 0-RTT requirements on all ticket uses.
// The benefit of using PSK+ECDHE without 0-RTT are small enough that
// we can give them up in the edge case of changed suite or ALPN or SNI.
if s.suite != hs.suite.id {
continue
}
if s.alpnProtocol != hs.c.clientProtocol {
continue
}
if s.SNI != hs.c.serverName {
continue
}
hs.keySchedule.setSecret(s.pskSecret)
binderKey := hs.keySchedule.deriveSecret(secretResumptionPskBinder)
binderFinishedKey := hkdfExpandLabel(hash, binderKey, nil, "finished", hashSize)
chHash := hash.New()
chHash.Write(hs.clientHello.rawTruncated)
expectedBinder := hmacOfSum(hash, chHash, binderFinishedKey)
if subtle.ConstantTimeCompare(expectedBinder, hs.clientHello.psks[i].binder) != 1 {
return false, alertDecryptError
}
if i == 0 && hs.clientHello.earlyData {
// This is a ticket intended to be used for 0-RTT
if s.maxEarlyDataLen == 0 {
// But we had not tagged it as such.
return false, alertIllegalParameter
}
if hs.c.config.Accept0RTTData {
hs.c.binder = expectedBinder
hs.c.ticketMaxEarlyData = int64(s.maxEarlyDataLen)
hs.hello13Enc.earlyData = true
}
}
hs.hello.psk = true
hs.hello.pskIdentity = uint16(i)
return true, alertSuccess
}
return false, alertSuccess
}
func (hs *serverHandshakeState) sendSessionTicket13() error {
c := hs.c
if c.config.SessionTicketsDisabled {
return nil
}
foundDHE := false
for _, mode := range hs.clientHello.pskKeyExchangeModes {
if mode == pskDHEKeyExchange {
foundDHE = true
break
}
}
if !foundDHE {
return nil
}
resumptionMasterSecret := hs.keySchedule.deriveSecret(secretResumption)
ageAddBuf := make([]byte, 4)
sessionState := &sessionState13{
vers: c.vers,
suite: hs.suite.id,
createdAt: uint64(time.Now().Unix()),
alpnProtocol: c.clientProtocol,
SNI: c.serverName,
maxEarlyDataLen: c.config.Max0RTTDataSize,
}
hash := hashForSuite(hs.suite)
for i := 0; i < numSessionTickets; i++ {
if _, err := io.ReadFull(c.config.rand(), ageAddBuf); err != nil {
c.sendAlert(alertInternalError)
return err
}
sessionState.ageAdd = uint32(ageAddBuf[0])<<24 | uint32(ageAddBuf[1])<<16 |
uint32(ageAddBuf[2])<<8 | uint32(ageAddBuf[3])
// ticketNonce must be a unique value for this connection.
// Assume there are no more than 255 tickets, otherwise two
// tickets might have the same PSK which could be a problem if
// one of them is compromised.
ticketNonce := []byte{byte(i)}
sessionState.pskSecret = hkdfExpandLabel(hash, resumptionMasterSecret, ticketNonce, "resumption", hash.Size())
ticket := sessionState.marshal()
var err error
if c.config.SessionTicketSealer != nil {
cs := c.ConnectionState()
ticket, err = c.config.SessionTicketSealer.Seal(&cs, ticket)
} else {
ticket, err = c.encryptTicket(ticket)
}
if err != nil {
c.sendAlert(alertInternalError)
return err
}
if ticket == nil {
continue
}
ticketMsg := &newSessionTicketMsg13{
lifetime: 24 * 3600, // TODO(filippo)
maxEarlyDataLength: c.config.Max0RTTDataSize,
withEarlyDataInfo: c.config.Max0RTTDataSize > 0,
ageAdd: sessionState.ageAdd,
nonce: ticketNonce,
ticket: ticket,
}
if _, err := c.writeRecord(recordTypeHandshake, ticketMsg.marshal()); err != nil {
return err
}
}
return nil
}
func (hs *serverHandshakeState) traceErr(err error) {
if err == nil {
return
}
if os.Getenv("TLSDEBUG") == "error" {
if hs != nil && hs.clientHello != nil {
os.Stderr.WriteString(hex.Dump(hs.clientHello.marshal()))
} else if err == io.EOF {
return // don't stack trace on EOF before CH
}
fmt.Fprintf(os.Stderr, "\n%s\n", debug.Stack())
}
if os.Getenv("TLSDEBUG") == "short" {
var pcs [4]uintptr
frames := runtime.CallersFrames(pcs[0:runtime.Callers(3, pcs[:])])
for {
frame, more := frames.Next()
if frame.Function != "crypto/tls.(*halfConn).setErrorLocked" &&
frame.Function != "crypto/tls.(*Conn).sendAlertLocked" &&
frame.Function != "crypto/tls.(*Conn).sendAlert" {
file := frame.File[strings.LastIndex(frame.File, "/")+1:]
log.Printf("%s:%d (%s): %v", file, frame.Line, frame.Function, err)
return
}
if !more {
break
}
}
}
}
func getCertsFromEntries(certEntries []certificateEntry) [][]byte {
certs := make([][]byte, len(certEntries))
for i, cert := range certEntries {
certs[i] = cert.data
}
return certs
}
func (hs *clientHandshakeState) processEncryptedExtensions(ee *encryptedExtensionsMsg) error {
c := hs.c
if ee.alpnProtocol != "" {
c.clientProtocol = ee.alpnProtocol
c.clientProtocolFallback = false
}
return nil
}
func verifyPeerHandshakeSignature(
certVerify *certificateVerifyMsg,
pubKey crypto.PublicKey,
signAlgosKnown []SignatureScheme,
transHash []byte,
contextString string) (error, alert) {
_, sigType, hashFunc, err := pickSignatureAlgorithm(
pubKey,
[]SignatureScheme{certVerify.signatureAlgorithm},
signAlgosKnown,
VersionTLS13)
if err != nil {
return err, alertHandshakeFailure
}
digest := prepareDigitallySigned(hashFunc, contextString, transHash)
err = verifyHandshakeSignature(sigType, pubKey, hashFunc, digest, certVerify.signature)
if err != nil {
return err, alertDecryptError
}
return nil, alertSuccess
}
func (hs *clientHandshakeState) getCertificate13(certReq *certificateRequestMsg13) (*Certificate, error) {
certReq12 := &certificateRequestMsg{
hasSignatureAndHash: true,
supportedSignatureAlgorithms: certReq.supportedSignatureAlgorithms,
certificateAuthorities: certReq.certificateAuthorities,
}
var rsaAvail, ecdsaAvail bool
for _, sigAlg := range certReq.supportedSignatureAlgorithms {
switch signatureFromSignatureScheme(sigAlg) {
case signaturePKCS1v15, signatureRSAPSS:
rsaAvail = true
case signatureECDSA:
ecdsaAvail = true
}
}
if rsaAvail {
certReq12.certificateTypes = append(certReq12.certificateTypes, certTypeRSASign)
}
if ecdsaAvail {
certReq12.certificateTypes = append(certReq12.certificateTypes, certTypeECDSASign)
}
return hs.getCertificate(certReq12)
}
func (hs *clientHandshakeState) sendCertificate13(chainToSend *Certificate, certReq *certificateRequestMsg13) error {
c := hs.c
certEntries := []certificateEntry{}
for _, cert := range chainToSend.Certificate {
certEntries = append(certEntries, certificateEntry{data: cert})
}
certMsg := &certificateMsg13{certificates: certEntries}
hs.keySchedule.write(certMsg.marshal())
if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
return err
}
if len(certEntries) == 0 {
// No client cert available, nothing to sign.
return nil
}
key, ok := chainToSend.PrivateKey.(crypto.Signer)
if !ok {
c.sendAlert(alertInternalError)
return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey)
}
signatureAlgorithm, sigType, hashFunc, err := pickSignatureAlgorithm(key.Public(), certReq.supportedSignatureAlgorithms, hs.hello.supportedSignatureAlgorithms, c.vers)
if err != nil {
hs.c.sendAlert(alertHandshakeFailure)
return err
}
digest := prepareDigitallySigned(hashFunc, "TLS 1.3, client CertificateVerify", hs.keySchedule.transcriptHash.Sum(nil))
signOpts := crypto.SignerOpts(hashFunc)
if sigType == signatureRSAPSS {
signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: hashFunc}
}
signature, err := key.Sign(c.config.rand(), digest, signOpts)
if err != nil {
c.sendAlert(alertInternalError)
return err
}
verifyMsg := &certificateVerifyMsg{
hasSignatureAndHash: true,
signatureAlgorithm: signatureAlgorithm,
signature: signature,
}
hs.keySchedule.write(verifyMsg.marshal())
if _, err := c.writeRecord(recordTypeHandshake, verifyMsg.marshal()); err != nil {
return err
}
return nil
}
func (hs *clientHandshakeState) doTLS13Handshake() error {
c := hs.c
hash := hashForSuite(hs.suite)
hashSize := hash.Size()
serverHello := hs.serverHello
c.scts = serverHello.scts
// middlebox compatibility mode, send CCS before second flight.
if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
return err
}
// TODO check if keyshare is unacceptable, raise HRR.
clientKS := hs.hello.keyShares[0]
if serverHello.keyShare.group != clientKS.group {
c.sendAlert(alertIllegalParameter)
return errors.New("bad or missing key share from server")
}
// 0-RTT is not supported yet, so use an empty PSK.
hs.keySchedule.setSecret(nil)
ecdheSecret := c.deriveDHESecret(serverHello.keyShare, hs.privateKey)
if ecdheSecret == nil {
c.sendAlert(alertIllegalParameter)
return errors.New("tls: bad ECDHE server share")
}
// Calculate handshake secrets.
hs.keySchedule.setSecret(ecdheSecret)
clientCipher, clientHandshakeSecret := hs.keySchedule.prepareCipher(secretHandshakeClient)
serverCipher, serverHandshakeSecret := hs.keySchedule.prepareCipher(secretHandshakeServer)
if c.hand.Len() > 0 {
c.sendAlert(alertUnexpectedMessage)
return errors.New("tls: unexpected data after Server Hello")
}
// Do not change the sender key yet, the server must authenticate first.
c.in.setCipher(c.vers, serverCipher)
// Calculate MAC key for Finished messages.
serverFinishedKey := hkdfExpandLabel(hash, serverHandshakeSecret, nil, "finished", hashSize)
clientFinishedKey := hkdfExpandLabel(hash, clientHandshakeSecret, nil, "finished", hashSize)
msg, err := c.readHandshake()
if err != nil {
return err
}
encryptedExtensions, ok := msg.(*encryptedExtensionsMsg)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(encryptedExtensions, msg)
}
if err := hs.processEncryptedExtensions(encryptedExtensions); err != nil {
return err
}
hs.keySchedule.write(encryptedExtensions.marshal())
// PSKs are not supported, so receive Certificate message.
msg, err = c.readHandshake()
if err != nil {
return err
}
var chainToSend *Certificate
certReq, isCertRequested := msg.(*certificateRequestMsg13)
if isCertRequested {
hs.keySchedule.write(certReq.marshal())
if chainToSend, err = hs.getCertificate13(certReq); err != nil {
c.sendAlert(alertInternalError)
return err
}
msg, err = c.readHandshake()
if err != nil {
return err
}
}
certMsg, ok := msg.(*certificateMsg13)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certMsg, msg)
}
hs.keySchedule.write(certMsg.marshal())
// Validate certificates.
certs := getCertsFromEntries(certMsg.certificates)
if err := hs.processCertsFromServer(certs); err != nil {
return err
}
// Receive CertificateVerify message.
msg, err = c.readHandshake()
if err != nil {
return err
}
certVerifyMsg, ok := msg.(*certificateVerifyMsg)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(certVerifyMsg, msg)
}
// Validate the DC if present. The DC is only processed if the extension was
// indicated by the ClientHello; otherwise this call will result in an
// "illegal_parameter" alert.
if len(certMsg.certificates) > 0 {
if err := hs.processDelegatedCredentialFromServer(
certMsg.certificates[0].delegatedCredential,
certVerifyMsg.signatureAlgorithm); err != nil {
return err
}
}
// Set the public key used to verify the handshake.
pk := hs.c.peerCertificates[0].PublicKey
// If the delegated credential extension has successfully been negotiated,
// then the CertificateVerify signature will have been produced with the
// DelegatedCredential's private key.
if hs.c.verifiedDc != nil {
pk = hs.c.verifiedDc.cred.publicKey
}
// Verify the handshake signature.
err, alertCode := verifyPeerHandshakeSignature(
certVerifyMsg,
pk,
hs.hello.supportedSignatureAlgorithms,
hs.keySchedule.transcriptHash.Sum(nil),
"TLS 1.3, server CertificateVerify")
if err != nil {
c.sendAlert(alertCode)
return err
}
hs.keySchedule.write(certVerifyMsg.marshal())
// Receive Finished message.
msg, err = c.readHandshake()
if err != nil {
return err
}
serverFinished, ok := msg.(*finishedMsg)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return unexpectedMessageError(serverFinished, msg)
}
// Validate server Finished hash.
expectedVerifyData := hmacOfSum(hash, hs.keySchedule.transcriptHash, serverFinishedKey)
if subtle.ConstantTimeCompare(expectedVerifyData, serverFinished.verifyData) != 1 {
c.sendAlert(alertDecryptError)
return errors.New("tls: server's Finished message is incorrect")
}
hs.keySchedule.write(serverFinished.marshal())
// Server has authenticated itself. Calculate application traffic secrets.
hs.keySchedule.setSecret(nil) // derive master secret
appServerCipher, _ := hs.keySchedule.prepareCipher(secretApplicationServer)
appClientCipher, _ := hs.keySchedule.prepareCipher(secretApplicationClient)
// TODO store initial traffic secret key for KeyUpdate GH #85
// Change outbound handshake cipher for final step
c.out.setCipher(c.vers, clientCipher)
// Client auth requires sending a (possibly empty) Certificate followed
// by a CertificateVerify message (if there was an actual certificate).
if isCertRequested {
if err := hs.sendCertificate13(chainToSend, certReq); err != nil {
return err
}
}
// Send Finished
verifyData := hmacOfSum(hash, hs.keySchedule.transcriptHash, clientFinishedKey)
clientFinished := &finishedMsg{
verifyData: verifyData,
}
if _, err := c.writeRecord(recordTypeHandshake, clientFinished.marshal()); err != nil {
return err
}
// Handshake done, set application traffic secret
c.out.setCipher(c.vers, appClientCipher)
if c.hand.Len() > 0 {
c.sendAlert(alertUnexpectedMessage)
return errors.New("tls: unexpected data after handshake")
}
c.in.setCipher(c.vers, appServerCipher)
return nil
}
// supportedSigAlgorithmsCert iterates over schemes and filters out those algorithms
// which are not supported for certificate verification.
func supportedSigAlgorithmsCert(schemes []SignatureScheme) (ret []SignatureScheme) {
for _, sig := range schemes {
// X509 doesn't support PSS signatures
if !signatureSchemeIsPSS(sig) {
ret = append(ret, sig)
}
}
return
}
// Functions below implement dhKex interface for different DH shared secret agreements
// KEX: P-256, P-384, P-512 KEX
func (kexNist) generate(c *Conn, groupId CurveID) (private []byte, ks keyShare, err error) {
// never fails
curve, _ := curveForCurveID(groupId)
private, x, y, err := elliptic.GenerateKey(curve, c.config.rand())
if err != nil {
return nil, keyShare{}, err
}
ks.group = groupId
ks.data = elliptic.Marshal(curve, x, y)
return
}
func (kexNist) derive(c *Conn, ks keyShare, secretKey []byte) []byte {
// never fails
curve, _ := curveForCurveID(ks.group)
x, y := elliptic.Unmarshal(curve, ks.data)
if x == nil {
return nil
}
x, _ = curve.ScalarMult(x, y, secretKey)
xBytes := x.Bytes()
curveSize := (curve.Params().BitSize + 8 - 1) >> 3
if len(xBytes) == curveSize {
return xBytes
}
buf := make([]byte, curveSize)
copy(buf[len(buf)-len(xBytes):], xBytes)
return buf
}
// KEX: X25519
func (kexX25519) generate(c *Conn, groupId CurveID) ([]byte, keyShare, error) {
var scalar, public [x25519SharedSecretSz]byte
if _, err := io.ReadFull(c.config.rand(), scalar[:]); err != nil {
return nil, keyShare{}, err
}
curve25519.ScalarBaseMult(&public, &scalar)
return scalar[:], keyShare{group: X25519, data: public[:]}, nil
}
func (kexX25519) derive(c *Conn, ks keyShare, secretKey []byte) []byte {
var theirPublic, sharedKey, scalar [x25519SharedSecretSz]byte
if len(ks.data) != x25519SharedSecretSz {
return nil
}
copy(theirPublic[:], ks.data)
copy(scalar[:], secretKey)
curve25519.ScalarMult(&sharedKey, &scalar, &theirPublic)
return sharedKey[:]
}
// KEX: SIDH/503
func (kexSidhP503) generate(c *Conn, groupId CurveID) ([]byte, keyShare, error) {
var variant, _ = getSidhKeyVariant(c.isClient)
var prvKey = sidh.NewPrivateKey(sidh.FP_503, variant)
if prvKey.Generate(c.config.rand()) != nil {
return nil, keyShare{}, errors.New("tls: private SIDH key generation failed")
}
pubKey := prvKey.GeneratePublicKey()
return prvKey.Export(), keyShare{group: sidhP503, data: pubKey.Export()}, nil
}
func (kexSidhP503) derive(c *Conn, ks keyShare, key []byte) []byte {
var prvVariant, pubVariant = getSidhKeyVariant(c.isClient)
var prvKeySize = P503PrvKeySz
if len(ks.data) != P503PubKeySz || len(key) != prvKeySize {
return nil
}
prvKey := sidh.NewPrivateKey(sidh.FP_503, prvVariant)
pubKey := sidh.NewPublicKey(sidh.FP_503, pubVariant)
if err := prvKey.Import(key); err != nil {
return nil
}
if err := pubKey.Import(ks.data); err != nil {
return nil
}
// Never fails
sharedKey, _ := sidh.DeriveSecret(prvKey, pubKey)
return sharedKey
}
// KEX Hybrid SIDH/503-X25519
func (kexHybridSidhP503X25519) generate(c *Conn, groupId CurveID) (private []byte, ks keyShare, err error) {
var pubHybrid [SidhP503Curve25519PubKeySz]byte
var prvHybrid [SidhP503Curve25519PrvKeySz]byte
// Generate ephemeral key for classic x25519
private, ks, err = dhKexStrat[X25519].generate(c, groupId)
if err != nil {
return
}
copy(prvHybrid[:], private)
copy(pubHybrid[:], ks.data)
// Generate PQ ephemeral key for SIDH
private, ks, err = dhKexStrat[sidhP503].generate(c, groupId)
if err != nil {
return
}
copy(prvHybrid[x25519SharedSecretSz:], private)
copy(pubHybrid[x25519SharedSecretSz:], ks.data)
return prvHybrid[:], keyShare{group: HybridSidhP503Curve25519, data: pubHybrid[:]}, nil
}
func (kexHybridSidhP503X25519) derive(c *Conn, ks keyShare, key []byte) []byte {
var sharedKey [SidhP503Curve25519SharedKeySz]byte
var ret []byte
var tmpKs keyShare
// Key agreement for classic
tmpKs.group = X25519
tmpKs.data = ks.data[:x25519SharedSecretSz]
ret = dhKexStrat[X25519].derive(c, tmpKs, key[:x25519SharedSecretSz])
if ret == nil {
return nil
}
copy(sharedKey[:], ret)
// Key agreement for PQ
tmpKs.group = sidhP503
tmpKs.data = ks.data[x25519SharedSecretSz:]
ret = dhKexStrat[sidhP503].derive(c, tmpKs, key[x25519SharedSecretSz:])
if ret == nil {
return nil
}
copy(sharedKey[x25519SharedSecretSz:], ret)
return sharedKey[:]
}
| [
"\"TLSDEBUG\"",
"\"TLSDEBUG\""
]
| []
| [
"TLSDEBUG"
]
| [] | ["TLSDEBUG"] | go | 1 | 0 | |
ui/app.go | package ui
import (
"html/template"
"io"
"net/http"
"net/url"
"os"
"path"
"sort"
"strconv"
"github.com/labstack/echo"
"github.com/sirupsen/logrus"
"github.com/wookesh/gohist/diff"
"github.com/wookesh/gohist/objects"
)
type handler struct {
history *objects.History
repoName string
}
type Template struct {
templates *template.Template
}
func (t *Template) Render(w io.Writer, name string, data interface{}, c echo.Context) error {
return t.templates.ExecuteTemplate(w, name, data)
}
type Link struct {
Name string
First string
Len int
Total int
Deleted bool
}
type ListViewData struct {
RepoName string
Links Links
Stats map[string]interface{}
ChartsData map[string]objects.ChartData
}
type Links []Link
func (l Links) Len() int { return len(l) }
func (l Links) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l Links) Less(i, j int) bool { return l[i].Name < l[j].Name }
func (h *handler) List(c echo.Context) error {
onlyChangedStr := c.QueryParam("only_changed")
onlyChanged, err := strconv.ParseBool(onlyChangedStr)
if err != nil {
onlyChanged = false
}
listData := &ListViewData{RepoName: h.repoName, Stats: h.history.Stats(), ChartsData: h.history.ChartsData()}
for fName, fHistory := range h.history.Data {
if !onlyChanged || (onlyChanged && (len(fHistory.Elements) > 1 || fHistory.LifeTime == 1)) {
listData.Links = append(listData.Links,
Link{
Name: fName,
First: fHistory.First.Commit.Hash.String(),
Len: fHistory.VersionsCount(),
Total: fHistory.LifeTime,
Deleted: fHistory.Deleted,
})
}
}
sort.Sort(listData.Links)
return c.Render(http.StatusOK, "list.html", listData)
}
type DiffView struct {
Name string
History *objects.FunctionHistory
LeftDiff diff.Coloring
RightDiff diff.Coloring
First, Last string
}
func (h *handler) Get(c echo.Context) error {
funcName := c.Param("name")
funcName, err := url.QueryUnescape(funcName)
if err != nil {
return c.HTML(http.StatusNotFound, "NOT FOUND")
}
f, ok := h.history.Data[funcName]
if !ok {
return c.HTML(http.StatusNotFound, "NOT FOUND")
}
pos := c.QueryParam("pos")
cmp := c.QueryParam("cmp")
useLCS := c.QueryParam("lcs")
if _, ok := f.Elements[pos]; pos == "" || !ok {
pos = f.First.Commit.Hash.String()
}
element := f.Elements[pos]
if _, ok := element.Parent[cmp]; cmp == "" || !ok {
for sha := range element.Parent { // get random
cmp = sha
break
}
}
comparedElement := f.Elements[cmp]
var left, right diff.Coloring
switch pos {
case f.First.Commit.Hash.String():
right = diff.Diff(nil, element.Func, diff.ModeNew)
default:
if useLCS == "yes" {
left = diff.LCS(comparedElement.Text, element.Text, comparedElement.Offset, diff.ModeOld)
right = diff.LCS(comparedElement.Text, element.Text, element.Offset, diff.ModeNew)
} else {
left = diff.Diff(comparedElement.Func, element.Func, diff.ModeOld)
right = diff.Diff(element.Func, comparedElement.Func, diff.ModeNew)
}
}
diffView := &DiffView{
Name: funcName,
History: f,
LeftDiff: left,
RightDiff: right,
Last: f.Last.Commit.Hash.String(),
First: f.First.Commit.Hash.String(),
}
data := map[string]interface{}{"pos": pos, "diffView": diffView, "cmp": cmp, "lcs": useLCS}
return c.Render(http.StatusOK, "diff.html", data)
}
func Run(history *objects.History, repoName, port string) {
handler := handler{history: history, repoName: repoName}
funcMap := template.FuncMap{
"next": func(i int64) int64 {
return i + 1
},
"prev": func(i int64) int64 {
return i - 1
},
"prev_int": func(i int) int {
return i - 1
},
"color": color,
"modifications": func(a, b int, deleted bool) string {
if deleted || b == 0 {
return "dark"
}
stability := 1.0 - float64(a)/float64(b)
if stability >= 0.8 {
return "success"
} else if stability >= 0.5 {
return "warning"
} else {
return "danger"
}
},
"escape": func(s string) string {
return url.QueryEscape(s)
},
}
rootPath := path.Join(os.Getenv("GOPATH"), "src", "github.com", "wookesh", "gohist")
t := &Template{
templates: template.Must(template.New("sites").Funcs(funcMap).ParseGlob(path.Join(rootPath, "ui/views/*.html"))),
}
e := echo.New()
e.HideBanner = true
e.Renderer = t
e.GET("/", handler.List)
e.GET("/:name/", handler.Get)
e.Static("/static", path.Join(rootPath, "ui/static"))
logrus.Infoln("GoHist:", "started web server")
if err := e.Start("0.0.0.0:" + port); err != nil {
logrus.Fatalln(err)
}
}
func color(s string, coloring diff.Coloring, offset int) template.HTML {
if len(coloring) == 0 {
return template.HTML(s)
}
logrus.Debugln("color:", coloring, offset)
current := 0
var hasColoring bool
var result string
logrus.Debugln("color:", "next coloring:", current, coloring[current])
for i := 0; i < len(s); i++ {
if current < len(coloring) {
if !hasColoring && int(coloring[current].Pos) <= i+offset {
logrus.Debugln("color:", "changing color:", toColor(coloring[current].Color), i+offset)
hasColoring = true
result += `<span style="color: ` + toColor(coloring[current].Color) + `;">`
}
if hasColoring && int(coloring[current].End) < i+offset {
logrus.Debugln("color:", "removing color:", i+offset)
result += `</span>`
if current < len(coloring) {
current++
logrus.Debugln("color:", "next coloring:", current)
}
if current < len(coloring) && int(coloring[current].Pos) <= i+offset {
logrus.Debugln("color:", "changing color:", toColor(coloring[current].Color), i+offset)
result += `<span style="color: ` + toColor(coloring[current].Color) + `;">`
} else {
hasColoring = false
}
}
}
if s[i] == '<' {
result += `<span>` + string(s[i]) + `</span>` // TODO: I dunno how to frontend, find better solution later
} else {
result += string(s[i])
}
}
if hasColoring {
result += `</span>`
}
return template.HTML(result)
}
func toColor(c diff.Color) string {
switch c {
case diff.ColorSame:
return "white"
case diff.ColorNew:
return "green"
case diff.ColorRemoved:
return "red"
case diff.ColorSimilar:
return "lightblue"
default:
return "white"
}
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
lib/elasticsearch.go | package mpelasticsearch
import (
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"net/http"
"os"
"strings"
mp "github.com/mackerelio/go-mackerel-plugin"
"github.com/mackerelio/golib/logging"
)
var logger = logging.GetLogger("metrics.plugin.odfe")
var metricPlace = map[string][]string{
"http_opened": {"http", "total_opened"},
"total_indexing_index": {"indices", "indexing", "index_total"},
"total_indexing_delete": {"indices", "indexing", "delete_total"},
"total_get": {"indices", "get", "total"},
"total_search_query": {"indices", "search", "query_total"},
"total_search_fetch": {"indices", "search", "fetch_total"},
"total_merges": {"indices", "merges", "total"},
"total_refresh": {"indices", "refresh", "total"},
"total_flush": {"indices", "flush", "total"},
"total_warmer": {"indices", "warmer", "total"},
"total_percolate": {"indices", "percolate", "total"},
"total_suggest": {"indices", "suggest", "total"},
"docs_count": {"indices", "docs", "count"},
"docs_deleted": {"indices", "docs", "deleted"},
"fielddata_size": {"indices", "fielddata", "memory_size_in_bytes"},
"filter_cache_size": {"indices", "filter_cache", "memory_size_in_bytes"},
"segments_size": {"indices", "segments", "memory_in_bytes"},
"segments_index_writer_size": {"indices", "segments", "index_writer_memory_in_bytes"},
"segments_version_map_size": {"indices", "segments", "version_map_memory_in_bytes"},
"segments_fixed_bit_set_size": {"indices", "segments", "fixed_bit_set_memory_in_bytes"},
"evictions_fielddata": {"indices", "fielddata", "evictions"},
"evictions_filter_cache": {"indices", "filter_cache", "evictions"},
"heap_used": {"jvm", "mem", "heap_used_in_bytes"},
"heap_max": {"jvm", "mem", "heap_max_in_bytes"},
"threads_generic": {"thread_pool", "generic", "threads"},
"threads_index": {"thread_pool", "index", "threads"},
"threads_snapshot_data": {"thread_pool", "snapshot_data", "threads"},
"threads_get": {"thread_pool", "get", "threads"},
"threads_bench": {"thread_pool", "bench", "threads"},
"threads_snapshot": {"thread_pool", "snapshot", "threads"},
"threads_merge": {"thread_pool", "merge", "threads"},
"threads_suggest": {"thread_pool", "suggest", "threads"},
"threads_bulk": {"thread_pool", "bulk", "threads"},
"threads_optimize": {"thread_pool", "optimize", "threads"},
"threads_warmer": {"thread_pool", "warmer", "threads"},
"threads_flush": {"thread_pool", "flush", "threads"},
"threads_search": {"thread_pool", "search", "threads"},
"threads_percolate": {"thread_pool", "percolate", "threads"},
"threads_refresh": {"thread_pool", "refresh", "threads"},
"threads_management": {"thread_pool", "management", "threads"},
"threads_fetch_shard_started": {"thread_pool", "fetch_shard_started", "threads"},
"threads_fetch_shard_store": {"thread_pool", "fetch_shard_store", "threads"},
"threads_listener": {"thread_pool", "listener", "threads"},
"count_rx": {"transport", "rx_count"},
"count_tx": {"transport", "tx_count"},
"open_file_descriptors": {"process", "open_file_descriptors"},
// Added for ODFE
"query_cache_size": {"indices", "query_cache", "memory_size_in_bytes"},
"request_cache_size": {"indices", "request_cache", "memory_size_in_bytes"},
"threads_analyze": {"thread_pool", "analyze", "threads"},
"threads_force_merge": {"thread_pool", "force_merge", "threads"},
"threads_opendistro_monitor_runner": {"thread_pool", "opendistro_monitor_runner", "threads"},
"threads_search_throttled": {"thread_pool", "search_throttled", "threads"},
"threads_sql-worker": {"thread_pool", "sql-worker", "threads"},
}
func getFloatValue(s map[string]interface{}, keys []string) (float64, error) {
var val float64
sm := s
for i, k := range keys {
if i+1 < len(keys) {
switch sm[k].(type) {
case map[string]interface{}:
sm = sm[k].(map[string]interface{})
default:
return 0, errors.New("Cannot handle as a hash")
}
} else {
switch sm[k].(type) {
case float64:
val = sm[k].(float64)
default:
return 0, errors.New("Not float64")
}
}
}
return val, nil
}
// ElasticsearchPlugin mackerel plugin for Elasticsearch
type ElasticsearchPlugin struct {
URI string
Prefix string
LabelPrefix string
User string
Password string
Secure bool
}
// FetchMetrics interface for mackerelplugin
func (p ElasticsearchPlugin) FetchMetrics() (map[string]float64, error) {
req, err := http.NewRequest(http.MethodGet, p.URI+"/_nodes/_local/stats", nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "mackerel-plugin-elasticsearch")
if p.User != "" {
req.SetBasicAuth(p.User, p.Password)
}
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: !p.Secure},
}
client := http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
logger.Errorf("Request to elasticsearch failed: %s", err)
return nil, err
}
defer resp.Body.Close()
stat := make(map[string]float64)
decoder := json.NewDecoder(resp.Body)
var s map[string]interface{}
err = decoder.Decode(&s)
if err != nil {
return nil, err
}
nodes := s["nodes"].(map[string]interface{})
n := ""
for k := range nodes {
if n != "" {
return nil, errors.New("Multiple node found")
}
n = k
}
node := nodes[n].(map[string]interface{})
for k, v := range metricPlace {
val, err := getFloatValue(node, v)
if err != nil {
logger.Infof("Failed to find '%s': %s", k, err)
continue
}
stat[k] = val
}
return stat, nil
}
// GraphDefinition interface for mackerelplugin
func (p ElasticsearchPlugin) GraphDefinition() map[string]mp.Graphs {
var graphdef = map[string]mp.Graphs{
p.Prefix + ".http": {
Label: (p.LabelPrefix + " HTTP"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "http_opened", Label: "Opened", Diff: true},
},
},
p.Prefix + ".indices": {
Label: (p.LabelPrefix + " Indices"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "total_indexing_index", Label: "Indexing-Index", Diff: true, Stacked: true},
{Name: "total_indexing_delete", Label: "Indexing-Delete", Diff: true, Stacked: true},
{Name: "total_get", Label: "Get", Diff: true, Stacked: true},
{Name: "total_search_query", Label: "Search-Query", Diff: true, Stacked: true},
{Name: "total_search_fetch", Label: "Search-fetch", Diff: true, Stacked: true},
{Name: "total_merges", Label: "Merges", Diff: true, Stacked: true},
{Name: "total_refresh", Label: "Refresh", Diff: true, Stacked: true},
{Name: "total_flush", Label: "Flush", Diff: true, Stacked: true},
{Name: "total_warmer", Label: "Warmer", Diff: true, Stacked: true},
{Name: "total_percolate", Label: "Percolate", Diff: true, Stacked: true},
{Name: "total_suggest", Label: "Suggest", Diff: true, Stacked: true},
},
},
p.Prefix + ".indices.docs": {
Label: (p.LabelPrefix + " Indices Docs"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "docs_count", Label: "Count", Stacked: true},
{Name: "docs_deleted", Label: "Deleted", Stacked: true},
},
},
p.Prefix + ".indices.memory_size": {
Label: (p.LabelPrefix + " Indices Memory Size"),
Unit: "bytes",
Metrics: []mp.Metrics{
{Name: "fielddata_size", Label: "Fielddata", Stacked: true},
{Name: "filter_cache_size", Label: "Filter Cache", Stacked: true},
{Name: "segments_size", Label: "Lucene Segments", Stacked: true},
{Name: "segments_index_writer_size", Label: "Lucene Segments Index Writer", Stacked: true},
{Name: "segments_version_map_size", Label: "Lucene Segments Version Map", Stacked: true},
{Name: "segments_fixed_bit_set_size", Label: "Lucene Segments Fixed Bit Set", Stacked: true},
// Added for ODFE
{Name: "query_cache_size", Label: "Query Cache", Stacked: true},
{Name: "request_cache_size", Label: "Request Cache", Stacked: true},
},
},
p.Prefix + ".indices.evictions": {
Label: (p.LabelPrefix + " Indices Evictions"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "evictions_fielddata", Label: "Fielddata", Diff: true},
{Name: "evictions_filter_cache", Label: "Filter Cache", Diff: true},
},
},
p.Prefix + ".jvm.heap": {
Label: (p.LabelPrefix + " JVM Heap Mem"),
Unit: "bytes",
Metrics: []mp.Metrics{
{Name: "heap_used", Label: "Used"},
{Name: "heap_max", Label: "Max"},
},
},
p.Prefix + ".thread_pool.threads": {
Label: (p.LabelPrefix + " Thread-Pool Threads"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "threads_generic", Label: "Generic", Stacked: true},
{Name: "threads_index", Label: "Index", Stacked: true},
{Name: "threads_snapshot_data", Label: "Snapshot Data", Stacked: true},
{Name: "threads_get", Label: "Get", Stacked: true},
{Name: "threads_bench", Label: "Bench", Stacked: true},
{Name: "threads_snapshot", Label: "Snapshot", Stacked: true},
{Name: "threads_merge", Label: "Merge", Stacked: true},
{Name: "threads_suggest", Label: "Suggest", Stacked: true},
{Name: "threads_bulk", Label: "Bulk", Stacked: true},
{Name: "threads_optimize", Label: "Optimize", Stacked: true},
{Name: "threads_warmer", Label: "Warmer", Stacked: true},
{Name: "threads_flush", Label: "Flush", Stacked: true},
{Name: "threads_search", Label: "Search", Stacked: true},
{Name: "threads_percolate", Label: "Percolate", Stacked: true},
{Name: "threads_refresh", Label: "Refresh", Stacked: true},
{Name: "threads_management", Label: "Management", Stacked: true},
{Name: "threads_fetch_shard_started", Label: "Fetch Shard Started", Stacked: true},
{Name: "threads_fetch_shard_store", Label: "Fetch Shard Store", Stacked: true},
{Name: "threads_listener", Label: "Listener", Stacked: true},
// Added for ODFE
{Name: "threads_analyze", Label: "Analyze", Stacked: true},
{Name: "threads_force_merge", Label: "Force Merge", Stacked: true},
{Name: "threads_opendistro_monitor_runner", Label: "Monitor Runner", Stacked: true},
{Name: "threads_search_throttled", Label: "Search Throttled", Stacked: true},
{Name: "threads_sql-worker", Label: "SQL Worker", Stacked: true},
},
},
p.Prefix + ".transport.count": {
Label: (p.LabelPrefix + " Transport Count"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "count_rx", Label: "TX", Diff: true},
{Name: "count_tx", Label: "RX", Diff: true},
},
},
p.Prefix + ".process": {
Label: (p.LabelPrefix + " Process"),
Unit: "integer",
Metrics: []mp.Metrics{
{Name: "open_file_descriptors", Label: "Open File Descriptors"},
},
},
}
return graphdef
}
// Do the plugin
func Do() {
optScheme := flag.String("scheme", "http", "Scheme")
optHost := flag.String("host", "localhost", "Host")
optPort := flag.String("port", "9200", "Port")
optSecure := flag.Bool("secure", false, "Enable TLS certificate check")
defUser := os.Getenv("ES_USER")
optUser := flag.String("user", defUser, "BasicAuth user")
defPassword := os.Getenv("ES_PASSWORD")
optPassword := flag.String("password", defPassword, "BasicAuth password")
optPrefix := flag.String("metric-key-prefix", "elasticsearch", "Metric key prefix")
optLabelPrefix := flag.String("metric-label-prefix", "", "Metric Label prefix")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var elasticsearch ElasticsearchPlugin
elasticsearch.URI = fmt.Sprintf("%s://%s:%s", *optScheme, *optHost, *optPort)
elasticsearch.Prefix = *optPrefix
elasticsearch.User = *optUser
elasticsearch.Password = *optPassword
elasticsearch.Secure = *optSecure
if *optLabelPrefix == "" {
elasticsearch.LabelPrefix = strings.Title(*optPrefix)
} else {
elasticsearch.LabelPrefix = *optLabelPrefix
}
helper := mp.NewMackerelPlugin(elasticsearch)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.SetTempfileByBasename(fmt.Sprintf("mackerel-plugin-odfe-%s-%s", *optHost, *optPort))
}
helper.Run()
}
| [
"\"ES_USER\"",
"\"ES_PASSWORD\""
]
| []
| [
"ES_PASSWORD",
"ES_USER"
]
| [] | ["ES_PASSWORD", "ES_USER"] | go | 2 | 0 | |
src/toil/test/src/fileStoreTest.py | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import errno
import filecmp
import inspect
import logging
import os
import stat
import random
import signal
import time
from abc import ABCMeta
from struct import pack, unpack
from uuid import uuid4
import pytest
from toil.common import Toil
from toil.fileStores import FileID
from toil.fileStores.cachingFileStore import (CacheUnbalancedError,
IllegalDeletionCacheError)
from toil.job import Job
from toil.jobStores.abstractJobStore import NoSuchFileException
from toil.leader import FailedJobsException
from toil.realtimeLogger import RealtimeLogger
from toil.test import ToilTest, needs_aws_ec2, needs_google, slow, travis_test
# Some tests take too long on the AWS jobstore and are unquitable for CI. They can be
# be run during manual tests by setting this to False.
testingIsAutomatic = True
logger = logging.getLogger(__name__)
class hidden:
"""
Hiding the abstract test classes from the Unittest loader so it can be inherited in different
test suites for the different job stores.
"""
class AbstractFileStoreTest(ToilTest, metaclass=ABCMeta):
"""
An abstract base class for testing the various general functions described in
:class:toil.fileStores.abstractFileStore.AbstractFileStore
"""
# This is overwritten in the inheriting classs
jobStoreType = None
def _getTestJobStore(self):
if self.jobStoreType == 'file':
return self._getTestJobStorePath()
elif self.jobStoreType == 'aws':
return 'aws:{}:cache-tests-{}'.format(self.awsRegion(), str(uuid4()))
elif self.jobStoreType == 'google':
projectID = os.getenv('TOIL_GOOGLE_PROJECTID')
return 'google:{}:cache-tests-{}'.format(projectID, str(uuid4()))
else:
raise RuntimeError('Illegal job store type.')
def setUp(self):
super(hidden.AbstractFileStoreTest, self).setUp()
self.work_dir = self._createTempDir()
self.options = Job.Runner.getDefaultOptions(self._getTestJobStore())
self.options.logLevel = 'DEBUG'
self.options.realTimeLogging = True
self.options.workDir = self.work_dir
self.options.clean = 'always'
self.options.logFile = os.path.join(self.work_dir, 'logFile')
self.tmp_dir = self._createTempDir()
def create_file(self, content, executable=False):
file_path = f'{self.tmp_dir}/{uuid4()}'
with open(file_path, 'w') as f:
f.write(content)
if executable:
# Add file owner execute permissions
os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IXUSR)
return file_path
@staticmethod
def _uselessFunc(job):
"""
I do nothing. Don't judge me.
"""
return None
# Sanity test
@travis_test
def testToilIsNotBroken(self):
"""
Runs a simple DAG to test if if any features other that caching were broken.
"""
A = Job.wrapJobFn(self._uselessFunc)
B = Job.wrapJobFn(self._uselessFunc)
C = Job.wrapJobFn(self._uselessFunc)
D = Job.wrapJobFn(self._uselessFunc)
A.addChild(B)
A.addChild(C)
B.addChild(D)
C.addChild(D)
Job.Runner.startToil(A, self.options)
@slow
def testFileStoreLogging(self):
"""
Write a couple of files to the jobstore. Delete a couple of them. Read back written
and locally deleted files.
"""
class WatchingHandler(logging.Handler):
"""
A logging handler that watches for a certain substring and
trips a flag if it appears.
"""
def __init__(self, match: str):
super().__init__()
self.match = match
self.seen = False
def emit(self, record):
if self.match in record.getMessage():
self.seen = True
handler = WatchingHandler("cats.txt")
logging.getLogger().addHandler(handler)
F = Job.wrapJobFn(self._accessAndFail,
disk='100M')
try:
Job.Runner.startToil(F, self.options)
except FailedJobsException:
# We expect this.
pass
logging.getLogger().removeHandler(handler)
assert handler.seen, "Downloaded file name not found in logs of failing Toil run"
@staticmethod
def _accessAndFail(job):
with job.fileStore.writeGlobalFileStream() as (writable, file_id):
writable.write(b'Cats')
localPath = os.path.join(job.fileStore.getLocalTempDir(), 'cats.txt')
job.fileStore.readGlobalFile(file_id, localPath)
with job.fileStore.readGlobalFileStream(file_id) as readable:
pass
raise RuntimeError("I do not like this file")
# Test filestore operations. This is a slightly less intense version of the cache specific
# test `testReturnFileSizes`
@slow
def testFileStoreOperations(self):
"""
Write a couple of files to the jobstore. Delete a couple of them. Read back written
and locally deleted files.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
F = Job.wrapJobFn(self._testFileStoreOperations,
nonLocalDir=workdir,
numIters=30, disk='2G')
Job.Runner.startToil(F, self.options)
@staticmethod
def _testFileStoreOperations(job, nonLocalDir, numIters=100):
"""
Aux function for testFileStoreOperations Conduct numIters operations.
"""
work_dir = job.fileStore.getLocalTempDir()
writtenFiles = {} # fsID: (size, isLocal)
localFileIDs = set()
# Add one file for the sake of having something in the job store
writeFileSize = random.randint(0, 30)
cls = hidden.AbstractNonCachingFileStoreTest
fsId, _ = cls._writeFileToJobStore(job, isLocalFile=True, nonLocalDir=nonLocalDir,
fileMB=writeFileSize)
# Fill in the size of the local file we just made
writtenFiles[fsId] = writeFileSize
# Remember it actually should be local
localFileIDs.add(fsId)
logger.info('Now have local file: %s', fsId)
i = 0
while i <= numIters:
randVal = random.random()
if randVal < 0.33: # Write
writeFileSize = random.randint(0, 30)
isLocalFile = True if random.random() <= 0.5 else False
fsID, _ = cls._writeFileToJobStore(job, isLocalFile=isLocalFile,
nonLocalDir=nonLocalDir,
fileMB=writeFileSize)
writtenFiles[fsID] = writeFileSize
if isLocalFile:
localFileIDs.add(fsID)
logger.info('Wrote %s file of size %d MB: %s', 'local' if isLocalFile else 'non-local', writeFileSize, fsID)
else:
if len(writtenFiles) == 0:
continue
else:
fsID, rdelFileSize = random.choice(list(writtenFiles.items()))
rdelRandVal = random.random()
if randVal < 0.66: # Read
mutable = True if random.random() <= 0.5 else False
cache = True if random.random() <= 0.5 else False
job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, str(uuid4())]),
cache=cache, mutable=mutable)
localFileIDs.add(fsID)
logger.info('Read %s %s local copy of: %s', 'mutable' if mutable else 'immutable', 'cached' if cache else 'uncached', fsID)
else: # Delete
if rdelRandVal <= 0.5: # Local Delete
if fsID not in localFileIDs:
# Make sure trying to deleteLocalFile this file fails properly
try:
job.fileStore.deleteLocalFile(fsID)
except OSError as e:
if e.errno != errno.ENOENT:
# This is supposed to produce an
# ENOENT. If it doesn't something is
# broken.
raise
logger.info('Correctly fail to local-delete non-local file: %s', fsID)
else:
assert False, f"Was able to delete non-local file {fsID}"
else:
logger.info('Delete local file: %s', fsID)
job.fileStore.deleteLocalFile(fsID)
else: # Global Delete
job.fileStore.deleteGlobalFile(fsID)
writtenFiles.pop(fsID)
if fsID in localFileIDs:
localFileIDs.remove(fsID)
logger.info('No longer have file: %s', fsID)
i += 1
def testWriteReadGlobalFilePermissions(self):
"""
Ensures that uploaded files preserve their file permissions when they
are downloaded again. This function checks that a written executable file
maintains its executability after being read.
"""
for executable in True, False:
for disable_caching in True, False:
with self.subTest(f'Testing readwrite file permissions\n'
f'[executable: {executable}]\n'
f'[disable_caching: {disable_caching}]\n'):
self.options.disableCaching = disable_caching
read_write_job = Job.wrapJobFn(self._testWriteReadGlobalFilePermissions, executable=executable)
Job.Runner.startToil(read_write_job, self.options)
@staticmethod
def _testWriteReadGlobalFilePermissions(job, executable):
srcFile = job.fileStore.getLocalTempFile()
with open(srcFile, 'w') as f:
f.write('Hello')
if executable:
os.chmod(srcFile, os.stat(srcFile).st_mode | stat.S_IXUSR)
# Initial file owner execute permissions
initialPermissions = os.stat(srcFile).st_mode & stat.S_IXUSR
fileID = job.fileStore.writeGlobalFile(srcFile)
for mutable in True, False:
for symlink in True, False:
dstFile = job.fileStore.getLocalTempFileName()
job.fileStore.readGlobalFile(fileID, userPath=dstFile, mutable=mutable, symlink=symlink)
# Current file owner execute permissions
currentPermissions = os.stat(dstFile).st_mode & stat.S_IXUSR
assert initialPermissions == currentPermissions, f'{initialPermissions} != {currentPermissions}'
def testWriteExportFileCompatibility(self):
"""
Ensures that files created in a job preserve their executable permissions
when they are exported from the leader.
"""
for executable in True, False:
export_file_job = Job.wrapJobFn(self._testWriteExportFileCompatibility, executable=executable)
with Toil(self.options) as toil:
initialPermissions, fileID = toil.start(export_file_job)
dstFile = os.path.join(self._createTempDir(), str(uuid4()))
toil.exportFile(fileID, 'file://' + dstFile)
currentPermissions = os.stat(dstFile).st_mode & stat.S_IXUSR
assert initialPermissions == currentPermissions, f'{initialPermissions} != {currentPermissions}'
@staticmethod
def _testWriteExportFileCompatibility(job, executable):
srcFile = job.fileStore.getLocalTempFile()
with open(srcFile, 'w') as f:
f.write('Hello')
if executable:
os.chmod(srcFile, os.stat(srcFile).st_mode | stat.S_IXUSR)
initialPermissions = os.stat(srcFile).st_mode & stat.S_IXUSR
fileID = job.fileStore.writeGlobalFile(srcFile)
return initialPermissions, fileID
def testImportReadFileCompatibility(self):
"""
Ensures that files imported to the leader preserve their executable permissions
when they are read by the fileStore.
"""
with Toil(self.options) as toil:
for executable in True, False:
file_path = self.create_file(content='Hello', executable=executable)
initial_permissions = os.stat(file_path).st_mode & stat.S_IXUSR
file_id = toil.importFile(f'file://{file_path}')
for mutable in True, False:
for symlink in True, False:
with self.subTest(f'Now testing readGlobalFileWith: mutable={mutable} symlink={symlink}'):
A = Job.wrapJobFn(self._testImportReadFileCompatibility,
fileID=file_id,
initialPermissions=initial_permissions,
mutable=mutable,
symlink=symlink)
toil.start(A)
@staticmethod
def _testImportReadFileCompatibility(job, fileID, initialPermissions, mutable, symlink):
dstFile = job.fileStore.readGlobalFile(fileID, mutable=mutable, symlink=symlink)
currentPermissions = os.stat(dstFile).st_mode & stat.S_IXUSR
assert initialPermissions == currentPermissions
def testReadWriteFileStreamTextMode(self):
"""
Checks if text mode is compatible with file streams.
"""
with Toil(self.options) as toil:
A = Job.wrapJobFn(self._testReadWriteFileStreamTextMode)
toil.start(A)
@staticmethod
def _testReadWriteFileStreamTextMode(job):
with job.fileStore.writeGlobalFileStream(encoding='utf-8') as (stream, fileID):
stream.write('foo')
job.fileStore.readGlobalFileStream(fileID)
with job.fileStore.readGlobalFileStream(fileID, encoding='utf-8') as stream2:
assert 'foo' == stream2.read()
@staticmethod
def _writeFileToJobStore(job, isLocalFile, nonLocalDir=None, fileMB=1):
"""
This function creates a file and writes it to the jobstore.
:param bool isLocalFile: Is the file local(T) or Non-Local(F)?
:param str nonLocalDir: A dir to write the file to. If unspecified, a local directory
is created.
:param int fileMB: Size of the created file in MB
"""
if isLocalFile:
work_dir = job.fileStore.getLocalTempDir()
else:
assert nonLocalDir is not None
work_dir = nonLocalDir
with open(os.path.join(work_dir, str(uuid4())), 'wb') as testFile:
testFile.write(os.urandom(fileMB * 1024 * 1024))
return job.fileStore.writeGlobalFile(testFile.name), testFile
class AbstractNonCachingFileStoreTest(AbstractFileStoreTest, metaclass=ABCMeta):
"""
Abstract tests for the the various functions in
:class:toil.fileStores.nonCachingFileStore.NonCachingFileStore. These
tests are general enough that they can also be used for
:class:toil.fileStores.CachingFileStore.
"""
def setUp(self):
super(hidden.AbstractNonCachingFileStoreTest, self).setUp()
self.options.disableCaching = True
class AbstractCachingFileStoreTest(AbstractFileStoreTest, metaclass=ABCMeta):
"""
Abstract tests for the the various cache-related functions in
:class:toil.fileStores.cachingFileStore.CachingFileStore.
"""
def setUp(self):
super(hidden.AbstractCachingFileStoreTest, self).setUp()
self.options.disableCaching = False
@slow
def testExtremeCacheSetup(self):
"""
Try to create the cache with bad worker active and then have 10 child jobs try to run in
the chain. This tests whether the cache is created properly even when the job crashes
randomly.
"""
if testingIsAutomatic and self.jobStoreType != 'file':
self.skipTest("To save time")
self.options.retryCount = 20
self.options.badWorker = 0.5
self.options.badWorkerFailInterval = 0.1
for test in range(0, 20):
E = Job.wrapJobFn(self._uselessFunc)
F = Job.wrapJobFn(self._uselessFunc)
jobs = {}
for i in range(0, 10):
jobs[i] = Job.wrapJobFn(self._uselessFunc)
E.addChild(jobs[i])
jobs[i].addChild(F)
Job.Runner.startToil(E, self.options)
@slow
def testCacheEvictionPartialEvict(self):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
behavior is as expected.
"""
self._testValidityOfCacheEvictTest()
# Explicitly set clean to always so even the failed cases get cleaned (This will
# overwrite the value set in setUp if it is ever changed in the future)
self.options.clean = 'always'
self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=10)
@slow
def testCacheEvictionTotalEvict(self):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
behavior is as expected.
"""
self._testValidityOfCacheEvictTest()
# Explicitly set clean to always so even the failed cases get cleaned (This will
# overwrite the value set in setUp if it is ever changed in the future)
self.options.clean = 'always'
self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=30)
@slow
def testCacheEvictionFailCase(self):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
behavior is as expected.
"""
self._testValidityOfCacheEvictTest()
# Explicitly set clean to always so even the failed cases get cleaned (This will
# overwrite the value set in setUp if it is ever changed in the future)
self.options.clean = 'always'
self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=60)
def _testValidityOfCacheEvictTest(self):
# If the job store and cache are on the same file system, file
# sizes are accounted for by the job store and are not reflected in
# the cache hence this test is redundant (caching will be free).
if not self.options.jobStore.startswith(('aws', 'google')):
workDirDev = os.stat(self.options.workDir).st_dev
jobStoreDev = os.stat(os.path.dirname(self.options.jobStore)).st_dev
if workDirDev == jobStoreDev:
self.skipTest('Job store and working directory are on the same filesystem.')
def _testCacheEviction(self, file1MB, file2MB, diskRequestMB):
"""
Ensure the cache eviction happens as expected. Two files (20MB and 30MB) are written
sequentially into the job store in separate jobs. The cache max is force set to 50MB.
A Third Job requests either 10, 30 or 60MB -- requiring eviction of 1 file, both files,
or results in an error due to lack of space, respectively. Ensure that the behavior is
as expected.
"""
self.options.retryCount = 0
if diskRequestMB > 50:
# This can be non int as it will never reach _probeJobReqs
expectedResult = 'Fail'
else:
expectedResult = 50 - file1MB if diskRequestMB <= file1MB else 0
try:
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True,
fileMB=file1MB)
# Sleep for 1 second after writing the first file so that their ctimes are
# guaranteed to be distinct for the purpose of this test.
B = Job.wrapJobFn(self._sleepy, timeToSleep=1)
C = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True,
fileMB=file2MB)
D = Job.wrapJobFn(self._adjustCacheLimit, newTotalMB=50, disk='0Mi')
E = Job.wrapJobFn(self._uselessFunc, disk=''.join([str(diskRequestMB), 'Mi']))
# Set it to > 2GB such that the cleanup jobs don't die in the non-fail cases
F = Job.wrapJobFn(self._adjustCacheLimit, newTotalMB=5000, disk='10Mi')
G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, cached=expectedResult,
disk='100Mi')
A.addChild(B)
B.addChild(C)
C.addChild(D)
D.addChild(E)
E.addChild(F)
F.addChild(G)
Job.Runner.startToil(A, self.options)
except FailedJobsException as err:
with open(self.options.logFile) as f:
logContents = f.read()
if CacheUnbalancedError.message in logContents:
self.assertEqual(expectedResult, 'Fail')
else:
self.fail('Toil did not raise the expected CacheUnbalancedError but failed for some other reason')
@staticmethod
def _writeFileToJobStoreWithAsserts(job, isLocalFile, nonLocalDir=None, fileMB=1, expectAsyncUpload=True):
"""
This function creates a file and writes it to the jobstore.
:param bool isLocalFile: Is the file local(T) (i.e. in the file
store managed temp dir) or Non-Local(F)?
Non-local files should not be cached.
:param str nonLocalDir: A dir to write the file to. If unspecified, a local directory
is created.
:param int fileMB: Size of the created file in MB
:param bool expectAsyncUpload: Whether we expect the upload to hit
the job store later(T) or immediately(F)
"""
cls = hidden.AbstractNonCachingFileStoreTest
fsID, testFile = cls._writeFileToJobStore(job, isLocalFile, nonLocalDir, fileMB)
actual = os.stat(testFile.name).st_nlink
# If the caching is free, the job store must have hard links to
# everything the file store has.
expectJobStoreLink = job.fileStore.cachingIsFree()
# How many links ought this file to have?
expected = 1
if isLocalFile:
# We expect a hard link into the cache and not a copy
expected += 1
if expectJobStoreLink and not expectAsyncUpload:
# We also expect a link in the job store
expected += 1
assert actual == expected, 'Should have %d links. Got %d.' % (expected, actual)
logger.info('Uploaded %s with %d links', fsID, actual)
if not isLocalFile:
# Make sure it isn't cached if we don't want it to be
assert not job.fileStore.fileIsCached(fsID), "File uploaded from non-local-temp directory %s should not be cached" % nonLocalDir
return fsID
@staticmethod
def _sleepy(job, timeToSleep):
"""
I'm waiting for prince charming... but only for timeToSleep seconds.
:param int timeToSleep: Time in seconds
"""
time.sleep(timeToSleep)
@staticmethod
def _adjustCacheLimit(job, newTotalMB):
"""
This function tells the cache to adopt a new "total" value =
newTotalMB, changing the maximum cache disk space allowed for the
run.
:param int newTotalMB: New total cache disk space limit in MB.
"""
# Convert to bytes and pass on to the actual cache
job.fileStore.adjustCacheLimit(float(newTotalMB * 1024 * 1024))
@staticmethod
def _probeJobReqs(job, total=None, cached=None, sigmaJob=None):
"""
Probes the cacheLockFile to ensure the values for total, disk and cache are as expected.
Can also specify combinations of the requirements if desired.
:param int total: Expected Total Space available for caching in MB.
:param int cached: Expected Total size of files in the cache in MB.
:param int sigmaJob: Expected sum of job requirements in MB.
"""
RealtimeLogger.info('Probing job requirements')
valueDict = locals()
assert (total or cached or sigmaJob)
# Work out which function to call for which value
toCall = {'total': job.fileStore.getCacheLimit,
'cached': job.fileStore.getCacheUsed,
'sigmaJob': job.fileStore.getCacheExtraJobSpace}
for value in ('total', 'cached', 'sigmaJob'):
# If the value wasn't provided, it is None and should be ignored
if valueDict[value] is None:
continue
RealtimeLogger.info('Probing cache state: %s', value)
expectedBytes = valueDict[value] * 1024 * 1024
cacheInfoBytes = toCall[value]()
RealtimeLogger.info('Got %d for %s; expected %d', cacheInfoBytes, value, expectedBytes)
assert cacheInfoBytes == expectedBytes, 'Testing %s: Expected ' % value + \
f'{expectedBytes} but got {cacheInfoBytes}.'
@slow
def testAsyncWriteWithCaching(self):
"""
Ensure the Async Writing of files happens as expected. The first Job forcefully
modifies the cache size to 1GB. The second asks for 1GB of disk and writes a 900MB
file into cache then rewrites it to the job store triggering an async write since the
two unique jobstore IDs point to the same local file. Also, the second write is not
cached since the first was written to cache, and there "isn't enough space" to cache the
second. Imediately assert that the second write isn't cached, and is being
asynchronously written to the job store.
Attempting to get the file from the jobstore should not fail.
"""
self.options.retryCount = 0
self.options.logLevel = 'DEBUG'
A = Job.wrapJobFn(self._adjustCacheLimit, newTotalMB=1024, disk='1G')
B = Job.wrapJobFn(self._doubleWriteFileToJobStore, fileMB=850, disk='900M')
C = Job.wrapJobFn(self._readFromJobStoreWithoutAssertions, fsID=B.rv(), disk='1G')
# Set it to > 2GB such that the cleanup jobs don't die.
D = Job.wrapJobFn(self._adjustCacheLimit, newTotalMB=5000, disk='1G')
A.addChild(B)
B.addChild(C)
C.addChild(D)
Job.Runner.startToil(A, self.options)
@staticmethod
def _doubleWriteFileToJobStore(job, fileMB):
"""
Write a local file to job store, then write it again. The second should trigger an
async write.
:param job: job
:param fileMB: File Size
:return: Job store file ID for second written file
"""
job.fileStore.logToMaster('Double writing a file into job store')
work_dir = job.fileStore.getLocalTempDir()
with open(os.path.join(work_dir, str(uuid4())), 'wb') as testFile:
testFile.write(os.urandom(fileMB * 1024 * 1024))
job.fileStore.logToMaster('Writing copy 1 and discarding ID')
job.fileStore.writeGlobalFile(testFile.name)
job.fileStore.logToMaster('Writing copy 2 and saving ID')
fsID = job.fileStore.writeGlobalFile(testFile.name)
job.fileStore.logToMaster(f'Copy 2 ID: {fsID}')
hidden.AbstractCachingFileStoreTest._readFromJobStoreWithoutAssertions(job, fsID)
job.fileStore.logToMaster('Writing copy 3 and returning ID')
return job.fileStore.writeGlobalFile(testFile.name)
@staticmethod
def _readFromJobStoreWithoutAssertions(job, fsID):
"""
Reads a file from the job store. That will be all, thank you.
:param job: job
:param fsID: Job store file ID for the read file
:return: None
"""
job.fileStore.logToMaster('Reading the written file')
job.fileStore.readGlobalFile(fsID)
# writeGlobalFile tests
@travis_test
def testWriteNonLocalFileToJobStore(self):
"""
Write a file not in localTempDir to the job store. Such a file should not be cached.
Ensure the file is not cached.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=False,
nonLocalDir=workdir)
Job.Runner.startToil(A, self.options)
@travis_test
def testWriteLocalFileToJobStore(self):
"""
Write a file from the localTempDir to the job store. Such a file will be cached by
default. Ensure the file is cached.
"""
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True)
Job.Runner.startToil(A, self.options)
# readGlobalFile tests
@travis_test
def testReadCacheMissFileFromJobStoreWithoutCachingReadFile(self):
"""
Read a file from the file store that does not have a corresponding cached copy. Do not
cache the read file. Ensure the number of links on the file are appropriate.
"""
self._testCacheMissFunction(cacheReadFile=False)
@travis_test
def testReadCacheMissFileFromJobStoreWithCachingReadFile(self):
"""
Read a file from the file store that does not have a corresponding cached copy. Cache
the read file. Ensure the number of links on the file are appropriate.
"""
self._testCacheMissFunction(cacheReadFile=True)
def _testCacheMissFunction(self, cacheReadFile):
"""
This is the function that actually does what the 2 cache miss functions want.
:param cacheReadFile: Does the read file need to be cached(T) or not(F)
"""
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=False,
nonLocalDir=workdir)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=False,
cacheReadFile=cacheReadFile, fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
@staticmethod
def _readFromJobStore(job, isCachedFile, cacheReadFile, fsID, isTest=True):
"""
Read a file from the filestore. If the file was cached, ensure it was hard linked
correctly. If it wasn't, ensure it was put into cache.
Note that we may see hard links when we don't expect it based on
caching, because immutable reads from the FileJobStore can be
fulfilled by hardlinks. We only do immutable reads.
:param bool isCachedFile: Flag. Was the read file read from cache(T)? If so, we look for a hard link.
:param bool cacheReadFile: Should the the file that is read be cached(T)?
:param str fsID: job store file ID
:param bool isTest: Is this being run as a test(T) or an accessory to another test(F)?
"""
work_dir = job.fileStore.getLocalTempDir()
wantHardLink = False
if isCachedFile:
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']),
mutable=False)
wantHardLink = True
else:
if cacheReadFile:
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']),
cache=True, mutable=False)
wantHardLink = True
else:
assert not job.fileStore.fileIsCached(fsID), "File mistakenly cached before read"
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']),
cache=False, mutable=False)
assert not job.fileStore.fileIsCached(fsID), "File mistakenly cached after read"
wantHardLink = False
if isTest:
actual = os.stat(outfile).st_nlink
if wantHardLink:
assert actual > 1, 'Should have multiple links for file that was %s and %s. Got %i.' % ('cached' if isCachedFile else 'not cached',
'saved' if cacheReadFile else 'not saved', actual)
# We need to accept harf links even if we don't want them,
# because we may get them straight from the FileJobStore since
# we asked for immutable reads.
return None
else:
return outfile
@travis_test
def testReadCachHitFileFromJobStore(self):
"""
Read a file from the file store that has a corresponding cached copy. Ensure the number
of links on the file are appropriate.
"""
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=True, cacheReadFile=None,
fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
@slow
def testMultipleJobsReadSameCacheHitGlobalFile(self):
"""
Write a local file to the job store (hence adding a copy to cache), then have 10 jobs
read it. Assert cached file size never goes up, assert unused job
required disk space is always:
(a multiple of job reqs) - (number of current file readers * filesize).
At the end, assert the cache shows unused job-required space = 0.
"""
self._testMultipleJobsReadGlobalFileFunction(cacheHit=True)
@slow
def testMultipleJobsReadSameCacheMissGlobalFile(self):
"""
Write a non-local file to the job store(hence no cached copy), then have 10 jobs read
it. Assert cached file size never goes up, assert unused job
required disk space is always:
(a multiple of job reqs) - (number of current file readers * filesize).
At the end, assert the cache shows unused job-required space = 0.
"""
self._testMultipleJobsReadGlobalFileFunction(cacheHit=False)
def _testMultipleJobsReadGlobalFileFunction(self, cacheHit):
"""
This function does what the two Multiple File reading tests want to do
:param bool cacheHit: Is the test for the CacheHit case(T) or cacheMiss case(F)
"""
dirPurpose = 'tempWriteDir' if cacheHit else 'nonLocalDir'
workdir = self._createTempDir(purpose=dirPurpose)
with open(os.path.join(workdir, 'test'), 'w') as x:
x.write(str(0))
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=cacheHit,
nonLocalDir=workdir,
fileMB=256)
B = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk='100Mi')
jobs = {}
for i in range(0, 10):
jobs[i] = Job.wrapJobFn(self._multipleFileReader, diskMB=1024, fsID=A.rv(),
maxWriteFile=os.path.abspath(x.name), disk='1Gi',
memory='10Mi', cores=1)
A.addChild(jobs[i])
jobs[i].addChild(B)
Job.Runner.startToil(A, self.options)
with open(x.name) as y:
assert int(y.read()) > 2
@staticmethod
def _multipleFileReader(job, diskMB, fsID, maxWriteFile):
"""
Read a file from the job store immutable and explicitly ask to have it in the cache.
If caching files is free, assert used cache space is zero, else
assert it is equal to the read file.
Also assert the sum job reqs + (number of readers of file * filesize) is
and integer multiple of the disk requirements provided to this job.
:param int diskMB: disk requirements provided to the job
:param str fsID: job store file ID
:param str maxWriteFile: path to file where the max number of concurrent readers of
file will be written
"""
work_dir = job.fileStore.getLocalTempDir()
outfile = job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, 'temp']), cache=True,
mutable=False)
diskBytes = diskMB * 1024 * 1024
fileStats = os.stat(outfile)
fileSize = fileStats.st_size
currentReaders = job.fileStore.getFileReaderCount(fsID)
extraJobSpace = job.fileStore.getCacheExtraJobSpace()
usedCache = job.fileStore.getCacheUsed()
logger.info('Extra job space: %s', str(extraJobSpace))
logger.info('Current file readers: %s', str(currentReaders))
logger.info('File size: %s', str(fileSize))
logger.info('Job disk bytes: %s', str(diskBytes))
logger.info('Used cache: %s', str(usedCache))
with open(maxWriteFile, 'r+') as x:
prev_max = int(x.read())
x.seek(0)
x.truncate()
x.write(str(max(prev_max, currentReaders)))
if job.fileStore.cachingIsFree():
# No space should be used when caching is free
assert usedCache == 0.0
else:
# The right amount of space should be used otherwise
assert usedCache == fileSize
# Make sure that there's no over-usage of job requirements
assert ((extraJobSpace + currentReaders * fileSize) %
diskBytes) == 0.0
# Sleep so there's no race conditions where a job ends before another can get a hold of
# the file
time.sleep(3)
@staticmethod
def _writeExportGlobalFile(job):
fileName = os.path.join(job.fileStore.getLocalTempDir(), 'testfile')
with open(fileName, 'wb') as f:
f.write(os.urandom(1024 * 30000)) # 30 Mb
outputFile = os.path.join(job.fileStore.getLocalTempDir(), 'exportedFile')
job.fileStore.export_file(job.fileStore.writeGlobalFile(fileName), 'File://' + outputFile)
if not filecmp.cmp(fileName, outputFile):
logger.warning('Source file: %s', str(os.stat(fileName)))
logger.warning('Destination file: %s', str(os.stat(outputFile)))
raise RuntimeError(f"File {fileName} did not properly get copied to {outputFile}")
@slow
def testFileStoreExportFile(self):
# Tests that files written to job store can be immediately exported
# motivated by https://github.com/BD2KGenomics/toil/issues/1469
root = Job.wrapJobFn(self._writeExportGlobalFile)
Job.Runner.startToil(root, self.options)
# Testing for the return of file sizes to the sigma job pool.
@slow
def testReturnFileSizes(self):
"""
Write a couple of files to the jobstore. Delete a couple of them.
Read back written and locally deleted files. Ensure that after
every step that the cache is in a valid state.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
F = Job.wrapJobFn(self._returnFileTestFn,
jobDisk=2 * 1024 * 1024 * 1024,
initialCachedSize=0,
nonLocalDir=workdir,
disk='2Gi')
Job.Runner.startToil(F, self.options)
@slow
def testReturnFileSizesWithBadWorker(self):
"""
Write a couple of files to the jobstore. Delete a couple of them.
Read back written and locally deleted files. Ensure that after
every step that the cache is in a valid state.
"""
self.options.retryCount = 20
self.options.badWorker = 0.5
self.options.badWorkerFailInterval = 0.1
workdir = self._createTempDir(purpose='nonLocalDir')
F = Job.wrapJobFn(self._returnFileTestFn,
jobDisk=2 * 1024 * 1024 * 1024,
initialCachedSize=0,
nonLocalDir=workdir,
numIters=30, disk='2Gi')
Job.Runner.startToil(F, self.options)
@staticmethod
def _returnFileTestFn(job, jobDisk, initialCachedSize, nonLocalDir, numIters=100):
"""
Aux function for jobCacheTest.testReturnFileSizes Conduct numIters operations and ensure
the cache has the right amount of data in it at all times.
Track the cache calculations even thought they won't be used in filejobstore
Assumes nothing is evicted from the cache.
:param float jobDisk: The value of disk passed to this job.
"""
cached = initialCachedSize
RealtimeLogger.info('Expecting %d bytes cached initially', cached)
work_dir = job.fileStore.getLocalTempDir()
writtenFiles = {} # fsID: (size, isLocal)
# fsid: local/mutable/immutable for all operations that should make local files as tracked by the FileStore
localFileIDs = collections.defaultdict(list)
# Add one file for the sake of having something in the job store
writeFileSize = random.randint(0, 30)
jobDisk -= writeFileSize * 1024 * 1024
# We keep jobDisk in sync with the amount of free space the job
# still has that the file store doesn't know it has used.
cls = hidden.AbstractCachingFileStoreTest
fsId = cls._writeFileToJobStoreWithAsserts(job, isLocalFile=True, fileMB=writeFileSize)
writtenFiles[fsId] = writeFileSize
if job.fileStore.fileIsCached(list(writtenFiles.keys())[0]):
cached += writeFileSize * 1024 * 1024
RealtimeLogger.info('Expecting %d bytes cached because file of %d MB is cached', cached, writeFileSize)
else:
RealtimeLogger.info('Expecting %d bytes cached because file of %d MB is not cached', cached, writeFileSize)
localFileIDs[list(writtenFiles.keys())[0]].append('local')
RealtimeLogger.info('Checking for %d bytes cached', cached)
cls._requirementsConcur(job, jobDisk, cached)
i = 0
while i <= numIters:
randVal = random.random()
if randVal < 0.33: # Write
RealtimeLogger.info('Writing a file')
writeFileSize = random.randint(0, 30)
if random.random() <= 0.5: # Write a local file
RealtimeLogger.info('Writing a local file of %d MB', writeFileSize)
fsID = cls._writeFileToJobStoreWithAsserts(job, isLocalFile=True,
fileMB=writeFileSize)
RealtimeLogger.info('Wrote local file: %s', fsID)
writtenFiles[fsID] = writeFileSize
localFileIDs[fsID].append('local')
jobDisk -= writeFileSize * 1024 * 1024
if job.fileStore.fileIsCached(fsID):
cached += writeFileSize * 1024 * 1024
RealtimeLogger.info('Expecting %d bytes cached because file of %d MB is cached', cached, writeFileSize)
else:
RealtimeLogger.info('Expecting %d bytes cached because file of %d MB is not cached', cached, writeFileSize)
else: # Write a non-local file
RealtimeLogger.info('Writing a non-local file of %d MB', writeFileSize)
fsID = cls._writeFileToJobStoreWithAsserts(job, isLocalFile=False,
nonLocalDir=nonLocalDir,
fileMB=writeFileSize)
RealtimeLogger.info('Wrote non-local file: %s', fsID)
writtenFiles[fsID] = writeFileSize
# Don't record in localFileIDs because we're not local
# No change to the job since there was no caching
RealtimeLogger.info('Checking for %d bytes cached', cached)
cls._requirementsConcur(job, jobDisk, cached)
else:
if len(writtenFiles) == 0:
continue
else:
fsID, rdelFileSize = random.choice(list(writtenFiles.items()))
rdelRandVal = random.random()
fileWasCached = job.fileStore.fileIsCached(fsID)
if randVal < 0.66: # Read
RealtimeLogger.info('Reading a file with size %d and previous cache status %s: %s', rdelFileSize, str(fileWasCached), fsID)
if rdelRandVal <= 0.5: # Read as mutable, uncached
RealtimeLogger.info('Reading as mutable and uncached; should still have %d bytes cached', cached)
job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, str(uuid4())]),
mutable=True, cache=False)
localFileIDs[fsID].append('mutable')
# No change because the file wasn't cached
else: # Read as immutable
RealtimeLogger.info('Reading as immutable and cacheable')
job.fileStore.readGlobalFile(fsID, '/'.join([work_dir, str(uuid4())]),
mutable=False, cache=True)
localFileIDs[fsID].append('immutable')
jobDisk -= rdelFileSize * 1024 * 1024
if not fileWasCached:
if job.fileStore.fileIsCached(fsID):
RealtimeLogger.info('File was not cached before and is now. Should have %d bytes cached', cached)
cached += rdelFileSize * 1024 * 1024
else:
RealtimeLogger.info('File was not cached before and still is not now. '
'Should still have %d bytes cached', cached)
else:
RealtimeLogger.info('File was cached before. Should still have %d bytes cached', cached)
cls._requirementsConcur(job, jobDisk, cached)
else: # Delete
if rdelRandVal <= 0.5: # Local Delete
if fsID not in list(localFileIDs.keys()):
continue
RealtimeLogger.info('Deleting a file locally with history %s: %s', localFileIDs[fsID], fsID)
job.fileStore.deleteLocalFile(fsID)
else: # Global Delete
RealtimeLogger.info('Deleting a file globally: %s', fsID)
job.fileStore.deleteGlobalFile(fsID)
try:
job.fileStore.readGlobalFile(fsID)
except FileNotFoundError as err:
pass
except:
raise RuntimeError('Got wrong error type for read of deleted file')
else:
raise RuntimeError('Able to read deleted file')
writtenFiles.pop(fsID)
if fsID in list(localFileIDs.keys()):
for lFID in localFileIDs[fsID]:
if lFID != 'mutable':
jobDisk += rdelFileSize * 1024 * 1024
localFileIDs.pop(fsID)
if fileWasCached:
if not job.fileStore.fileIsCached(fsID):
cached -= rdelFileSize * 1024 * 1024
RealtimeLogger.info('File was cached before and is not now. Should have %d bytes cached', cached)
else:
RealtimeLogger.info('File was cached before and still is cached now. '
'Should still have %d bytes cached', cached)
else:
RealtimeLogger.info('File was not cached before deletion. Should still have %d bytes cached', cached)
cls._requirementsConcur(job, jobDisk, cached)
i += 1
return jobDisk, cached
@staticmethod
def _requirementsConcur(job, jobDisk, cached):
"""
Assert the values for job disk and total cached file sizes tracked
by the file store are equal to the values we expect.
"""
used = job.fileStore.getCacheUsed()
if not job.fileStore.cachingIsFree():
RealtimeLogger.info('Caching is not free; %d bytes are used and %d bytes are expected', used, cached)
assert used == cached, 'Cache should have %d bytes used, but actually has %d bytes used' % (cached, used)
else:
RealtimeLogger.info('Caching is free; %d bytes are used and %d bytes would be expected if caching were not free', used, cached)
assert used == 0, 'Cache should have nothing in it, but actually has %d bytes used' % used
jobUnused = job.fileStore.getCacheUnusedJobRequirement()
assert jobUnused == jobDisk, 'Job should have %d bytes of disk for non-FileStore use but the FileStore reports %d' % (jobDisk, jobUnused)
# Testing the resumability of a failed worker
@slow
def testControlledFailedWorkerRetry(self):
"""
Conduct a couple of job store operations. Then die. Ensure that the restarted job is
tracking values in the cache state file appropriately.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
self.options.retryCount = 1
jobDiskBytes = 2 * 1024 * 1024 * 1024
F = Job.wrapJobFn(self._controlledFailTestFn, jobDisk=jobDiskBytes,
testDir=workdir,
disk=jobDiskBytes)
G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk='100Mi')
F.addChild(G)
Job.Runner.startToil(F, self.options)
@staticmethod
def _controlledFailTestFn(job, jobDisk, testDir):
"""
This is the aux function for the controlled failed worker test. It does a couple of
cache operations, fails, then checks whether the new worker starts with the expected
value, and whether it computes cache statistics correctly.
:param float jobDisk: Disk space supplied for this job
:param str testDir: Testing directory
"""
# Make sure we actually have the disk size we are supposed to
job.fileStore.logToMaster('Job is running with %d bytes of disk, %d requested' % (job.disk, jobDisk))
assert job.disk == jobDisk, 'Job was scheduled with %d bytes but requested %d' % (job.disk, jobDisk)
cls = hidden.AbstractCachingFileStoreTest
if os.path.exists(os.path.join(testDir, 'testfile.test')):
with open(os.path.join(testDir, 'testfile.test'), 'rb') as fH:
cached = unpack('d', fH.read())[0]
RealtimeLogger.info('Loaded expected cache size of %d from testfile.test', cached)
cls._requirementsConcur(job, jobDisk, cached)
cls._returnFileTestFn(job, jobDisk, cached, testDir, 20)
else:
RealtimeLogger.info('Expecting cache size of 0 because testfile.test is absent')
modifiedJobReqs, cached = cls._returnFileTestFn(job, jobDisk, 0, testDir, 20)
with open(os.path.join(testDir, 'testfile.test'), 'wb') as fH:
fH.write(pack('d', cached))
RealtimeLogger.info('Wrote cache size of %d to testfile.test', cached)
os.kill(os.getpid(), signal.SIGKILL)
@slow
def testRemoveLocalMutablyReadFile(self):
"""
If a mutably read file is deleted by the user, it is ok.
"""
self._deleteLocallyReadFilesFn(readAsMutable=True)
@slow
def testRemoveLocalImmutablyReadFile(self):
"""
If an immutably read file is deleted by the user, it is not ok.
"""
self._deleteLocallyReadFilesFn(readAsMutable=False)
def _deleteLocallyReadFilesFn(self, readAsMutable):
self.options.retryCount = 0
A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True, memory='10M')
B = Job.wrapJobFn(self._removeReadFileFn, A.rv(), readAsMutable=readAsMutable,
memory='20M')
A.addChild(B)
Job.Runner.startToil(A, self.options)
@staticmethod
def _removeReadFileFn(job, fileToDelete, readAsMutable):
"""
Accept a file. Run os.remove on it. Then attempt to delete it locally. This will raise
an error for files read immutably.
Then write a new file to the jobstore and try to do the same. This should always raise
an error
:param fileToDelete: File written to the job store that is tracked by the cache
"""
work_dir = job.fileStore.getLocalTempDir()
# Are we processing the read file or the written file?
processsingReadFile = True
# Read in the file
outfile = job.fileStore.readGlobalFile(fileToDelete, os.path.join(work_dir, 'temp'),
mutable=readAsMutable)
tempfile = os.path.join(work_dir, 'tmp.tmp')
# The first time we run this loop, processsingReadFile is True and fileToDelete is the
# file read from the job store. The second time, processsingReadFile is False and
# fileToDelete is one that was just written in to the job store. Ensure the correct
# behaviour is seen in both conditions.
while True:
os.rename(outfile, tempfile)
try:
job.fileStore.deleteLocalFile(fileToDelete)
except IllegalDeletionCacheError:
job.fileStore.logToMaster('Detected a deleted file %s.' % fileToDelete)
os.rename(tempfile, outfile)
else:
# If we are processing the write test, or if we are testing the immutably read
# file, we should not reach here.
assert processsingReadFile and readAsMutable
if processsingReadFile:
processsingReadFile = False
# Write a file
with open(os.path.join(work_dir, str(uuid4())), 'wb') as testFile:
testFile.write(os.urandom(1 * 1024 * 1024))
fileToDelete = job.fileStore.writeGlobalFile(testFile.name)
outfile = testFile.name
else:
break
@travis_test
def testDeleteLocalFile(self):
"""
Test the deletion capabilities of deleteLocalFile
"""
self.options.retryCount = 0
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._deleteLocalFileFn, nonLocalDir=workdir)
Job.Runner.startToil(A, self.options)
@staticmethod
def _deleteLocalFileFn(job, nonLocalDir):
"""
Test deleteLocalFile on a local write, non-local write, read, mutable read, and bogus
jobstore IDs.
"""
work_dir = job.fileStore.getLocalTempDir()
# Write local file
with open(os.path.join(work_dir, str(uuid4())), 'wb') as localFile:
localFile.write(os.urandom(1 * 1024 * 1024))
localFsID = job.fileStore.writeGlobalFile(localFile.name)
# write Non-Local File
with open(os.path.join(nonLocalDir, str(uuid4())), 'wb') as nonLocalFile:
nonLocalFile.write(os.urandom(1 * 1024 * 1024))
nonLocalFsID = job.fileStore.writeGlobalFile(nonLocalFile.name)
# Delete fsid of local file. The file should be deleted
job.fileStore.deleteLocalFile(localFsID)
assert not os.path.exists(localFile.name)
# Delete fsid of non-local file. It should fail and the file should persist
try:
job.fileStore.deleteLocalFile(nonLocalFsID)
except OSError as e:
if e.errno != errno.ENOENT:
# This is supposed to produce an
# ENOENT. If it doesn't something is
# broken.
raise
else:
assert False, "Error should have been raised"
assert os.path.exists(nonLocalFile.name)
# Read back one file and then delete it
readBackFile1 = job.fileStore.readGlobalFile(localFsID)
job.fileStore.deleteLocalFile(localFsID)
assert not os.path.exists(readBackFile1)
# Read back one file with 2 different names and then delete it. Assert both get deleted
readBackFile1 = job.fileStore.readGlobalFile(localFsID)
readBackFile2 = job.fileStore.readGlobalFile(localFsID)
job.fileStore.deleteLocalFile(localFsID)
assert not os.path.exists(readBackFile1)
assert not os.path.exists(readBackFile2)
# Try to get a non-FileID that doesn't exist.
try:
job.fileStore.readGlobalFile('bogus')
except NoSuchFileException:
# TODO: We would like to require TypeError, but for Cactus
# support we have to accept non-FileIDs.
pass
else:
raise RuntimeError("Managed to get a file from a non-FileID")
# Try to get a FileID for something that doesn't exist
try:
job.fileStore.readGlobalFile(FileID('bogus', 4096))
except NoSuchFileException:
pass
else:
raise RuntimeError("Managed to read a non-existent file")
@travis_test
def testSimultaneousReadsUncachedStream(self):
"""
Test many simultaneous read attempts on a file created via a stream
directly to the job store.
"""
self.options.retryCount = 0
self.options.disableChaining = True
# Make a file
parent = Job.wrapJobFn(self._createUncachedFileStream)
# Now make a bunch of children fight over it
for i in range(30):
parent.addChildJobFn(self._readFileWithDelay, parent.rv())
Job.Runner.startToil(parent, self.options)
@staticmethod
def _createUncachedFileStream(job):
"""
Create and return a FileID for a non-cached file written via a stream.
"""
messageBytes = b'This is a test file\n'
with job.fileStore.jobStore.write_file_stream() as (out, idString):
# Write directly to the job store so the caching file store doesn't even see it.
# TODO: If we ever change how the caching file store does its IDs we will have to change this.
out.write(messageBytes)
# Now make a file ID
fileID = FileID(idString, len(messageBytes))
return fileID
@staticmethod
def _readFileWithDelay(job, fileID, cores=0.1, memory=50 * 1024 * 1024, disk=50 * 1024 * 1024):
"""
Read a file from the CachingFileStore with a delay imposed on the download.
Should create contention.
Has low requirements so we can run a lot of copies at once.
"""
# Make sure the file store delays
# Delay needs to be longer than the timeout for sqlite locking in the file store.
job.fileStore.forceDownloadDelay = 120
readStart = datetime.datetime.now()
logger.debug('Begin read at %s', str(readStart))
localPath = job.fileStore.readGlobalFile(fileID, cache=True, mutable=True)
readEnd = datetime.datetime.now()
logger.debug('End read at %s: took %f seconds', str(readEnd), (readEnd - readStart).total_seconds())
with open(localPath, 'rb') as fh:
text = fh.read().decode('utf-8').strip()
logger.debug('Got file contents: %s', text)
class NonCachingFileStoreTestWithFileJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'file'
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithFileJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'file'
@needs_aws_ec2
class NonCachingFileStoreTestWithAwsJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'aws'
@slow
@needs_aws_ec2
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithAwsJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'aws'
@needs_google
class NonCachingFileStoreTestWithGoogleJobStore(hidden.AbstractNonCachingFileStoreTest):
jobStoreType = 'google'
@slow
@needs_google
@pytest.mark.timeout(1000)
class CachingFileStoreTestWithGoogleJobStore(hidden.AbstractCachingFileStoreTest):
jobStoreType = 'google'
def _exportStaticMethodAsGlobalFunctions(cls):
"""
Define utility functions because Toil can't pickle static methods. Note that this relies on
the convention that the first argument of a job function is named 'job'.
"""
for name, kind, clazz, value in inspect.classify_class_attrs(cls):
if kind == 'static method' and name != '__new__': # __new__ became static in 3.7
method = value.__func__
args = inspect.getfullargspec(method).args
if args and args[0] == 'job':
globals()[name] = method
_exportStaticMethodAsGlobalFunctions(hidden.AbstractFileStoreTest)
_exportStaticMethodAsGlobalFunctions(hidden.AbstractCachingFileStoreTest)
_exportStaticMethodAsGlobalFunctions(hidden.AbstractNonCachingFileStoreTest)
| []
| []
| [
"TOIL_GOOGLE_PROJECTID"
]
| [] | ["TOIL_GOOGLE_PROJECTID"] | python | 1 | 0 | |
test-container/src/test/java/io/strimzi/test/container/StrimziKafkaContainerTest.java | /*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.test.container;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
public class StrimziKafkaContainerTest {
private static final Logger LOGGER = LogManager.getLogger(StrimziKafkaContainerTest.class);
private StrimziKafkaContainer systemUnderTest;
@Test
void testAtLeastOneVersionKafkaIsPresent() {
assumeDocker();
systemUnderTest = StrimziKafkaContainer.create(1);
LOGGER.info("Verifying that at least one kafka version is present.");
assertThat(StrimziKafkaContainer.getSupportedKafkaVersions(), is(not(nullValue())));
systemUnderTest.stop();
}
private void assumeDocker() {
Assumptions.assumeTrue(System.getenv("DOCKER_CMD") == null || "docker".equals(System.getenv("DOCKER_CMD")));
}
@Test
void testVersions() {
assumeDocker();
systemUnderTest = StrimziKafkaContainer.create(1);
List<String> supportedKafkaVersions = new ArrayList<>();
// Read Kafka versions
try (BufferedReader bufferedReader = new BufferedReader(new FileReader("src/main/resources/kafka-versions.txt"))) {
String kafkaVersion;
while ((kafkaVersion = bufferedReader.readLine()) != null) {
supportedKafkaVersions.add(kafkaVersion);
}
} catch (IOException e) {
e.printStackTrace();
}
// sort kafka version from low to high
Collections.sort(supportedKafkaVersions);
LOGGER.info("This is all supported Kafka versions {}", supportedKafkaVersions.toString());
assertThat(supportedKafkaVersions, is(StrimziKafkaContainer.getSupportedKafkaVersions()));
LOGGER.info("Verifying that {} is latest kafka version", supportedKafkaVersions.get(supportedKafkaVersions.size() - 1));
assertThat(supportedKafkaVersions.get(supportedKafkaVersions.size() - 1), is(StrimziKafkaContainer.getLatestKafkaVersion()));
// Read Strimzi version
String strimziVersion = null;
try (BufferedReader bufferedReader = new BufferedReader(new FileReader("src/main/resources/strimzi-version.txt"))) {
strimziVersion = bufferedReader.readLine();
} catch (IOException e) {
e.printStackTrace();
}
LOGGER.info("Asserting Strimzi version: {}", strimziVersion);
assertThat(strimziVersion, is(StrimziKafkaContainer.getStrimziVersion()));
systemUnderTest.stop();
}
@Test
void testStartContainerWithEmptyConfiguration() {
assumeDocker();
systemUnderTest = StrimziKafkaContainer.create(1);
systemUnderTest.start();
assertThat(systemUnderTest.getBootstrapServers(), is("PLAINTEXT://localhost:" + systemUnderTest.getMappedPort(9092)));
}
@Test
void testStartContainerWithSomeConfiguration() {
assumeDocker();
Map<String, String> kafkaConfiguration = new HashMap<>();
kafkaConfiguration.put("log.cleaner.enable", "false");
kafkaConfiguration.put("log.cleaner.backoff.ms", "1000");
kafkaConfiguration.put("ssl.enabled.protocols", "TLSv1");
kafkaConfiguration.put("log.index.interval.bytes", "2048");
systemUnderTest = StrimziKafkaContainer.createWithAdditionalConfiguration(1, kafkaConfiguration);
systemUnderTest.start();
String logsFromKafka = systemUnderTest.getLogs();
assertThat(logsFromKafka, containsString("log.cleaner.enable = false"));
assertThat(logsFromKafka, containsString("log.cleaner.backoff.ms = 1000"));
assertThat(logsFromKafka, containsString("ssl.enabled.protocols = [TLSv1]"));
assertThat(logsFromKafka, containsString("log.index.interval.bytes = 2048"));
systemUnderTest.stop();
}
}
| [
"\"DOCKER_CMD\"",
"\"DOCKER_CMD\""
]
| []
| [
"DOCKER_CMD"
]
| [] | ["DOCKER_CMD"] | java | 1 | 0 | |
docs/conf.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
"""
Sphinx documentation builder
"""
import os
# Set env flag so that we can doc functions that may otherwise not be loaded
# see for example interactive visualizations in qiskit.visualization.
os.environ['QISKIT_DOCS'] = 'TRUE'
# -- Project information -----------------------------------------------------
project = 'Qiskit ODE Solvers'
copyright = '2021, Qiskit Development Team' # pylint: disable=redefined-builtin
author = 'Qiskit Development Team'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.0.1b1'
rst_prolog = """
.. raw:: html
<br><br><br>
.. |version| replace:: {0}
""".format(release)
nbsphinx_prolog = """
{% set docname = env.doc2path(env.docname, base=None) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. raw:: html
<br><br><br>
.. note::
Run interactively in jupyter notebook.
"""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'jupyter_sphinx',
'sphinx_autodoc_typehints',
'reno.sphinxext',
'sphinx_panels',
'sphinx.ext.intersphinx',
'nbsphinx',
]
html_static_path = ['_static']
templates_path = ['_templates']
html_css_files = ['style.css', 'custom.css', 'gallery.css']
nbsphinx_timeout = 360
nbsphinx_execute = os.getenv('QISKIT_DOCS_BUILD_TUTORIALS', 'never')
nbsphinx_widgets_path = ''
exclude_patterns = ['_build', '**.ipynb_checkpoints']
nbsphinx_thumbnails = {
}
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
autodoc_default_options = {
'inherited-members': None,
}
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption.
numfig = True
# A dictionary mapping 'figure', 'table', 'code-block' and 'section' to
# strings that are used for format of figure numbers. As a special character,
# %s will be replaced to figure number.
numfig_format = {
'table': 'Table %s'
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A boolean that decides whether module names are prepended to all object names
# (for object types where a “module” of some kind is defined), e.g. for
# py:function directives.
add_module_names = False
# A list of prefixes that are ignored for sorting the Python module index
# (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F).
# This can be handy if you document a project that consists of a single
# package. Works only for the HTML builder currently.
modindex_common_prefix = ['qiskit_experiments.']
# -- Configuration for extlinks extension ------------------------------------
# Refer to https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'qiskit_sphinx_theme' # use the theme in subdir 'theme'
#html_sidebars = {'**': ['globaltoc.html']}
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
autoclass_content = 'both'
intersphinx_mapping = {'matplotlib': ('https://matplotlib.org/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None)}
| []
| []
| [
"QISKIT_DOCS_BUILD_TUTORIALS",
"QISKIT_DOCS"
]
| [] | ["QISKIT_DOCS_BUILD_TUTORIALS", "QISKIT_DOCS"] | python | 2 | 0 | |
src/main/java/io/link4pay/util/Environment.java | package io.link4pay.util;
import java.util.Arrays;
/**
* Indicates the environment of the Link4pay Gateway with which to interact.
*/
public class Environment {
/** For Link4pay internal development. */
public static final Environment DEVELOPMENT = new Environment(developmentBaseURL() + ":" + developmentPort(), "http://auth.venmo.dev:9292", new String[]{}, "development");
public static final Environment QA = new Environment("https://gateway.qa.braintreepayments.com:443", "https://auth.qa.venmo.com", new String[]{"ssl/api_link4pay_com.ca.crt", "ssl/payments_braintreeapi_com.ca.crt"}, "qa");
/** For production. */
public static final Environment PRODUCTION = new Environment("https://api.link4pay.com:443", "https://auth.venmo.com", new String[]{"ssl/api_link4pay_com.ca.crt", "ssl/payments_braintreeapi_com.ca.crt"}, "production");
/** For merchants to use during their development and testing. */
public static final Environment SANDBOX = new Environment("https://api.sandbox.link4pay.com:443", "https://auth.sandbox.venmo.com", new String[]{"ssl/api_link4pay_com.ca.crt", "ssl/payments_braintreeapi_com.ca.crt"}, "sandbox");
private String environmentName;
public final String baseURL;
public final String authURL;
public final String[] certificateFilenames;
public Environment(String baseURL, String authURL, String[] certificateFilenames, String environmentName) {
this.baseURL = baseURL;
this.authURL = authURL;
this.certificateFilenames = Arrays.copyOf(certificateFilenames, certificateFilenames.length);
this.environmentName = environmentName;
}
public static Environment parseEnvironment(String environment) {
if (environment.equals("development") || environment.equals("integration")) {
return DEVELOPMENT;
} else if (environment.equals("qa")) {
return QA;
} else if (environment.equals("sandbox")) {
return SANDBOX;
} else if (environment.equals("production")) {
return PRODUCTION;
} else {
throw new IllegalArgumentException("Unknown environment: " + environment);
}
}
private static String developmentBaseURL() {
if (System.getenv().get("GATEWAY_BASE_URL") != null) {
return System.getenv().get("GATEWAY_BASE_URL");
} else {
return "http://localhost";
}
}
public static String developmentPort() {
if (System.getenv().get("GATEWAY_PORT") != null) {
return System.getenv().get("GATEWAY_PORT");
} else {
return "3000";
}
}
public String getEnvironmentName() {
return this.environmentName;
}
public String toString() {
return getEnvironmentName();
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
router/router.go | package router
import (
"farcai-go/app/api"
"farcai-go/middleware"
"os"
"github.com/gogf/gf/frame/g"
"github.com/gogf/gf/net/ghttp"
)
func init() {
s := g.Server()
s.Group("/api/v1", func(group *ghttp.RouterGroup) {
group.Middleware(middleware.MiddlewareCors)
// 注册 非生产环境开放
if os.Getenv("ENV_MODE") != "PROD" {
group.POST("/signup", api.User.Register)
}
// 登录
group.POST("/login", api.User.Login)
// 文章搜索
group.GET("/post/search", api.Post.SearchPost)
// 备份数据库
group.GET("/backup/:secret", api.Assets.BackupDB)
// 认证中间件
group.Middleware(middleware.MiddlewareAuth)
// 获取用户信息
group.GET("/user-info", api.User.GetUserInfo)
// 添加分类
group.POST("/category", api.Category.AddCategory)
// 获取分类列表
group.GET("/category", api.Category.GetCategorys)
// 添加文章
group.POST("/post", api.Post.AddPost)
// 更新文章
group.PUT("/post", api.Post.UpdatePost)
// 根据ID查询文章
group.GET("/post/:id", api.Post.GetPost)
// 删除文章
group.DELETE("/post/:id", api.Post.DeletePost)
// 上传文件
group.GET("/credentials/cos", api.Assets.COSCredentials)
})
// 首页
s.BindHandler("/", api.Html.Home)
// 文章详情
s.BindHandler("/p/:id", api.Html.Detail)
// 编辑器页
s.BindHandler("/writing", api.Html.Writing)
// 登录页
s.BindHandler("/login", api.Html.Login)
// 归档
s.BindHandler("/pigeonhole", api.Html.Pigeonhole)
// 分类
s.BindHandler("/c/:cid", api.Html.Category)
// 自定义页面
s.BindHandler("/:custom", api.Html.CustomPage)
}
| [
"\"ENV_MODE\""
]
| []
| [
"ENV_MODE"
]
| [] | ["ENV_MODE"] | go | 1 | 0 | |
shikshastudio/shikshastudio/wsgi.py | """
WSGI config for shikshastudio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shikshastudio.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
demo/monitor_metrics.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
# Copyright and license info is available in the LICENSE file included with
# the Server Deployment Package (SDP), and also available online:
# https://swarm.workshop.perforce.com/projects/perforce-software-sdp/view/main/LICENSE
# ------------------------------------------------------------------------------
"""
NAME:
monitor_metrics.py
DESCRIPTION:
This script monitors locks and other Perforce server metrics for use with Prometheus.
Assumes it is wrapped by a simple bash script monitor_wrapper.sh
That configures SDP env or equivalent env vars.
"""
# Python 2.7/3.3 compatibility.
from __future__ import print_function
import sys
import os
import textwrap
import argparse
import logging
import re
import subprocess
import datetime
import json
python3 = (sys.version_info[0] >= 3)
LOGGER_NAME = 'monitor_metrics'
logger = logging.getLogger(LOGGER_NAME)
metrics_root = "/var/lib/prometheus/node-exporter/"
metrics_file = "locks.prom"
script_name = os.path.basename(os.path.splitext(__file__)[0])
LOGDIR = os.getenv('LOGS', '/var/log/perforce/')
DEFAULT_LOG_FILE = "log-%s.log" % script_name
if os.path.exists(LOGDIR):
DEFAULT_LOG_FILE = os.path.join(LOGDIR, "%s.log" % script_name)
DEFAULT_VERBOSITY = 'DEBUG'
LOGGER_NAME = 'monitor_metrics'
class MonitorMetrics:
"""Metric counts"""
def __init__(self):
self.dbReadLocks = 0
self.dbWriteLocks = 0
self.clientEntityReadLocks = 0
self.clientEntityWriteLocks = 0
self.metaReadLocks = 0
self.metaWriteLocks = 0
self.replicaReadLocks = 0
self.replicaWriteLocks = 0
self.blockedCommands = 0
self.p4root = "/data/perforce/Perforce"
self.msgs = []
class P4Monitor(object):
"""See module doc string for details"""
def __init__(self, *args, **kwargs):
self.parse_args(__doc__, args)
self.now = datetime.datetime.now()
self.sdpinst_label = ""
self.serverid_label = ""
if self.options.sdp_instance:
self.sdpinst_label = ',sdpinst="%s"' % self.options.sdp_instance
with open("/%s/server.id" % self.p4root, "r") as f:
self.serverid_label = 'serverid="%s"' % f.read().rstrip()
def parse_args(self, doc, args):
"""Common parsing and setting up of args"""
desc = textwrap.dedent(doc)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc,
epilog="Copyright (c) 2020 Perforce Software, Inc."
)
self.add_parse_args(parser)
self.options = parser.parse_args(args=args)
self.init_logger()
self.logger.debug("Command Line Options: %s\n" % self.options)
def add_parse_args(self, parser, default_log_file=None, default_verbosity=None):
"""Default trigger arguments - common to all triggers
:param default_verbosity:
:param default_log_file:
:param parser:
"""
if not default_log_file:
default_log_file = DEFAULT_LOG_FILE
if not default_verbosity:
default_verbosity = DEFAULT_VERBOSITY
parser.add_argument('-p', '--p4port', default=None,
help="Perforce server port. Default: $P4PORT")
parser.add_argument('-u', '--p4user', default=None, help="Perforce user. Default: $P4USER")
parser.add_argument('-L', '--log', default=default_log_file, help="Default: " + default_log_file)
parser.add_argument('--no-sdp', action='store_true', default=False, help="Whether this is SDP instance or not")
parser.add_argument('-i', '--sdp-instance', help="SDP instance")
parser.add_argument('-m', '--metrics-root', default=metrics_root, help="Metrics directory to use. Default: " + metrics_root)
parser.add_argument('-v', '--verbosity',
nargs='?',
const="INFO",
default=default_verbosity,
choices=('DEBUG', 'WARNING', 'INFO', 'ERROR', 'FATAL'),
help="Output verbosity level. Default is: " + default_verbosity)
def init_logger(self, logger_name=None):
if not logger_name:
logger_name = LOGGER_NAME
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(self.options.verbosity)
logformat = '%(levelname)s %(asctime)s %(filename)s %(lineno)d: %(message)s'
logging.basicConfig(format=logformat, filename=self.options.log, level=self.options.verbosity)
formatter = logging.Formatter('%(message)s')
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def run_cmd(self, cmd, get_output=True, timeout=35, stop_on_error=True):
"Run cmd logging input and output"
output = ""
try:
self.logger.debug("Running: %s" % cmd)
if get_output:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
if python3:
output, err = p.communicate(timeout=timeout)
else:
output, err = p.communicate()
rc = p.returncode
self.logger.debug("Output:\n%s" % output)
else:
if python3:
result = subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout)
else:
result = subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=True)
self.logger.debug('Result: %d' % result)
except subprocess.CalledProcessError as e:
self.logger.debug("Output: %s" % e.output)
if stop_on_error:
msg = 'Failed cmd: %d %s' % (e.returncode, str(e))
self.logger.debug(msg)
except Exception as e:
self.logger.debug("Output: %s" % output)
if stop_on_error:
msg = 'Failed cmd: %s' % str(e)
self.logger.debug(msg)
return output
def parseMonitorData(self, mondata):
reProc = re.compile("(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*(.*)$")
pids = {}
for line in mondata.split("\n"):
m = reProc.search(line)
if m:
pid = m.group(1)
runstate = m.group(2)
user = m.group(3)
elapsed = m.group(4)
cmd = m.group(5)
args = None
if len(m.groups()) == 6:
args = m.group(6)
pids[pid] = (user, cmd, args)
return pids
# Old versions of lslocks can't return json so we parse text
# For now assume no spaces in file paths or this won't work!
# COMMAND PID TYPE SIZE MODE M START END PATH BLOCKER
# (unknown) -1 OFDLCK 0B WRITE 0 0 0 /etc/hosts
# (unknown) -1 OFDLCK 0B READ 0 0 0
# p4d 107 FLOCK 16K READ* 0 0 0 /path/db.config 105
# p4d 105 FLOCK 16K WRITE 0 0 0 /path/db.config
# p4d 105 FLOCK 16K WRITE 0 0 0 /path/db.configh
def parseTextLockInfo(self, lockdata):
jlock = {'locks': []}
for line in lockdata.split("\n"):
parts = line.split()
if len(parts) < 9:
if line != "":
self.logger.warning("Failed to parse: %s" % line)
continue
if parts[0] == "COMMAND" or parts[3] == "START":
continue
lockinfo = {"command": parts[0], "pid": parts[1],
"type": parts[2], "size": parts[3],
"mode": parts[4], "m": parts[5],
"start": parts[6], "end": parts[7],
"path": parts[8], "blocker": None}
if len(parts) == 10:
lockinfo["blocker"] = parts[9]
jlock['locks'].append(lockinfo)
self.logger.debug("parsed TextLockInfo: %s" % str(jlock))
return json.dumps(jlock)
# lslocks output in JSON format:
# {"command": "p4d", "pid": "2502", "type": "FLOCK", "size": "17B",
# "mode": "READ", "m": "0", "start": "0", "end": "0",
# "path": "/p4/1/root/server.locks/clientEntity/10,d/robomerge-main-ts",
# "blocker": null}
def findLocks(self, lockdata, mondata):
"Finds appropriate locks by parsing data"
pids = self.parseMonitorData(mondata)
metrics = MonitorMetrics()
try:
jlock = json.loads(lockdata)
except Exception as e:
self.logger.warning("Failed to load json: %s", str(e))
jlock = []
locks = []
if 'locks' not in jlock:
return metrics
for j in jlock['locks']:
if "p4d" not in j["command"]:
continue
if "clientEntity" in j["path"]:
if j["mode"] == "READ":
metrics.clientEntityReadLocks += 1
elif j["mode"] == "WRITE":
metrics.clientEntityWriteLocks += 1
cmd = user = args = ""
pid = j["pid"]
mode = j["mode"]
path = j["path"]
if j["pid"] in pids:
user, cmd, args = pids[j["pid"]]
if "server.locks/meta" in j["path"]:
if j["mode"] == "READ":
metrics.metaReadLocks += 1
elif j["mode"] == "WRITE":
metrics.metaWriteLocks += 1
if "/db." in j["path"]:
if j["mode"] == "READ":
metrics.dbReadLocks += 1
if j["mode"] == "WRITE":
metrics.dbWriteLocks += 1
if j["blocker"]:
metrics.blockedCommands += 1
buser, bcmd, bargs = "unknown", "unknown", "unknown"
bpid = j["blocker"]
if bpid in pids:
buser, bcmd, bargs = pids[bpid]
msg = "pid %s, user %s, cmd %s, table %s, blocked by pid %s, user %s, cmd %s, args %s" % (
pid, user, cmd, path, bpid, buser, bcmd, bargs)
metrics.msgs.append(msg)
return metrics
def metricsHeader(self, name, help, type):
lines = []
lines.append("# HELP %s %s" % (name, help))
lines.append("# TYPE %s %s" % (name, type))
return lines
def formatMetrics(self, metrics):
lines = []
name = "p4_locks_db_read"
lines.extend(self.metricsHeader(name, "Database read locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.dbReadLocks))
name = "p4_locks_db_write"
lines.extend(self.metricsHeader(name, "Database write locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.dbWriteLocks))
name = "p4_locks_cliententity_read"
lines.extend(self.metricsHeader(name, "clientEntity read locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.clientEntityReadLocks))
name = "p4_locks_cliententity_write"
lines.extend(self.metricsHeader(name, "clientEntity write locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.clientEntityWriteLocks))
name = "p4_locks_meta_read"
lines.extend(self.metricsHeader(name, "meta db read locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.metaReadLocks))
name = "p4_locks_meta_write"
lines.extend(self.metricsHeader(name, "meta db write locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.metaWriteLocks))
name = "p4_locks_replica_read"
lines.extend(self.metricsHeader(name, "replica read locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.replicaReadLocks))
name = "p4_locks_replica_write"
lines.extend(self.metricsHeader(name, "replica write locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.replicaWriteLocks))
name = "p4_locks_cmds_blocked"
lines.extend(self.metricsHeader(name, "cmds blocked by locks", "gauge"))
lines.append("%s{%s%s} %s" % (name, self.serverid_label, self.sdpinst_label, metrics.blockedCommands))
return lines
def writeMetrics(self, lines):
fname = os.path.join(self.options.metrics_root, metrics_file)
self.logger.debug("Writing to metrics file: %s", fname)
self.logger.debug("Metrics: %s\n", "\n".join(lines))
tmpfname = fname + ".tmp"
with open(tmpfname, "w") as f:
f.write("\n".join(lines))
f.write("\n")
os.rename(tmpfname, fname)
def formatLog(self, metrics):
prefix = self.now.strftime("%Y-%m-%d %H:%M:%S")
lines = []
if not metrics.msgs:
lines.append("%s no blocked commands" % prefix)
else:
for m in metrics.msgs:
lines.append("%s %s" % (prefix, m))
return lines
def writeLog(self, lines):
with open(self.options.log, "a") as f:
f.write("\n".join(lines))
f.write("\n")
def getLslocksVer(self, ver):
# lslocks from util-linux 2.23.2
try:
return ver.split()[-1]
except:
return "1.0"
def run(self):
"""Runs script"""
p4cmd = "%s -u %s -p %s" % (os.environ["P4BIN"], os.environ["P4USER"], os.environ["P4PORT"])
verdata = self.run_cmd("lslocks -V")
locksver = self.getLslocksVer(verdata)
lockcmd = "lslocks -o +BLOCKER"
# If lslocks can't return JSON we parse it into JSON ourselves
if locksver > "2.26":
lockcmd += " -J"
lockdata = self.run_cmd(lockcmd)
else:
lockdata = self.run_cmd(lockcmd)
lockdata = self.parseTextLockInfo(lockdata)
mondata = self.run_cmd('{0} -F "%id% %runstate% %user% %elapsed% %function% %args%" monitor show -al'.format(p4cmd))
metrics = self.findLocks(lockdata, mondata)
self.writeLog(self.formatLog(metrics))
self.writeMetrics(self.formatMetrics(metrics))
if __name__ == '__main__':
""" Main Program"""
obj = P4Monitor(*sys.argv[1:])
obj.run()
| []
| []
| [
"P4PORT",
"LOGS",
"P4USER",
"P4BIN"
]
| [] | ["P4PORT", "LOGS", "P4USER", "P4BIN"] | python | 4 | 0 | |
unopartylib/lib/blocks.py | """
Initialise database.
Sieve blockchain for Unoparty transactions, and add them to the database.
"""
import os
import time
import binascii
import struct
import decimal
D = decimal.Decimal
import logging
logger = logging.getLogger(__name__)
import collections
import platform
import apsw
import csv
import copy
import http
import bitcoin as bitcoinlib
from bitcoin.core.script import CScriptInvalidError
from unopartylib.lib import config
from unopartylib.lib import exceptions
from unopartylib.lib import util
from unopartylib.lib import check
from unopartylib.lib import script
from unopartylib.lib import backend
from unopartylib.lib import log
from unopartylib.lib import database
from unopartylib.lib import message_type
from unopartylib.lib import arc4
from unopartylib.lib.transaction_helper import p2sh_encoding
from .messages import (send, order, btcpay, issuance, broadcast, bet, dividend, burn, cancel, rps, rpsresolve, destroy, sweep, dispenser)
from .messages.versions import enhanced_send, mpma
from .kickstart.blocks_parser import BlockchainParser, ChainstateParser
from .kickstart.utils import ib2h
from .exceptions import DecodeError, BTCOnlyError
# Order matters for FOREIGN KEY constraints.
TABLES = ['credits', 'debits', 'messages'] + \
['bet_match_resolutions', 'order_match_expirations', 'order_matches',
'order_expirations', 'orders', 'bet_match_expirations', 'bet_matches',
'bet_expirations', 'bets', 'broadcasts', 'btcpays', 'burns',
'cancels', 'dividends', 'issuances', 'sends',
'rps_match_expirations', 'rps_expirations', 'rpsresolves',
'rps_matches', 'rps',
'destructions', 'assets', 'addresses', 'sweeps', 'dispensers', 'dispenses']
# Compose list of tables tracked by undolog
UNDOLOG_TABLES = copy.copy(TABLES)
UNDOLOG_TABLES.remove('messages')
UNDOLOG_TABLES += ['balances']
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
with open(CURR_DIR + '/../mainnet_burns.csv', 'r') as f:
mainnet_burns_reader = csv.DictReader(f)
MAINNET_BURNS = {}
for line in mainnet_burns_reader:
MAINNET_BURNS[line['tx_hash']] = line
def parse_tx(db, tx):
"""Parse the transaction, return True for success."""
cursor = db.cursor()
try:
with db:
# Only one source and one destination allowed for now.
if len(tx['source'].split('-')) > 1:
return
if tx['destination']:
if len(tx['destination'].split('-')) > 1:
return
# Burns.
if tx['destination'] == config.UNSPENDABLE:
burn.parse(db, tx, MAINNET_BURNS)
return
if len(tx['data']) > 1:
try:
message_type_id, message = message_type.unpack(tx['data'], tx['block_index'])
except struct.error: # Deterministically raised.
message_type_id = None
message = None
else:
message_type_id = None
message = None
# Protocol change.
rps_enabled = tx['block_index'] >= 308500 or config.TESTNET or config.REGTEST
if message_type_id == send.ID:
send.parse(db, tx, message)
elif message_type_id == enhanced_send.ID and util.enabled('enhanced_sends', block_index=tx['block_index']):
enhanced_send.parse(db, tx, message)
elif message_type_id == mpma.ID and util.enabled('mpma_sends', block_index=tx['block_index']):
mpma.parse(db, tx, message)
elif message_type_id == order.ID:
order.parse(db, tx, message)
elif message_type_id == btcpay.ID:
btcpay.parse(db, tx, message)
elif message_type_id == issuance.ID:
issuance.parse(db, tx, message, message_type_id)
elif message_type_id == issuance.SUBASSET_ID and util.enabled('subassets', block_index=tx['block_index']):
issuance.parse(db, tx, message, message_type_id)
elif message_type_id == broadcast.ID:
broadcast.parse(db, tx, message)
elif message_type_id == bet.ID:
bet.parse(db, tx, message)
elif message_type_id == dividend.ID:
dividend.parse(db, tx, message)
elif message_type_id == cancel.ID:
cancel.parse(db, tx, message)
elif message_type_id == rps.ID and rps_enabled:
rps.parse(db, tx, message)
elif message_type_id == rpsresolve.ID and rps_enabled:
rpsresolve.parse(db, tx, message)
elif message_type_id == destroy.ID and util.enabled('destroy_reactivated', block_index=tx['block_index']):
destroy.parse(db, tx, message)
elif message_type_id == sweep.ID and util.enabled('sweep_send', block_index=tx['block_index']):
sweep.parse(db, tx, message)
elif message_type_id == dispenser.ID and util.enabled('dispensers', block_index=tx['block_index']):
dispenser.parse(db, tx, message)
elif message_type_id == dispenser.DISPENSE_ID and util.enabled('dispensers', block_index=tx['block_index']):
dispenser.dispense(db, tx)
else:
cursor.execute('''UPDATE transactions \
SET supported=? \
WHERE tx_hash=?''',
(False, tx['tx_hash']))
if tx['block_index'] != config.MEMPOOL_BLOCK_INDEX:
logger.info('Unsupported transaction: hash {}; data {}'.format(tx['tx_hash'], tx['data']))
cursor.close()
return False
# NOTE: for debugging (check asset conservation after every `N` transactions).
# if not tx['tx_index'] % N:
# check.asset_conservation(db)
return True
except Exception as e:
raise exceptions.ParseTransactionError("%s" % e)
finally:
cursor.close()
def parse_block(db, block_index, block_time,
previous_ledger_hash=None, ledger_hash=None,
previous_txlist_hash=None, txlist_hash=None,
previous_messages_hash=None):
"""Parse the block, return hash of new ledger, txlist and messages.
The unused arguments `ledger_hash` and `txlist_hash` are for the test suite.
"""
undolog_cursor = db.cursor()
#remove the row tracer and exec tracer on this cursor, so we don't utilize them with undolog operations...
undolog_cursor.setexectrace(None)
undolog_cursor.setrowtrace(None)
util.BLOCK_LEDGER = []
database.BLOCK_MESSAGES = []
assert block_index == util.CURRENT_BLOCK_INDEX
# Remove undolog records for any block older than we should be tracking
undolog_oldest_block_index = block_index - config.UNDOLOG_MAX_PAST_BLOCKS
first_undo_index = list(undolog_cursor.execute('''SELECT first_undo_index FROM undolog_block WHERE block_index == ?''',
(undolog_oldest_block_index,)))
if len(first_undo_index) == 1 and first_undo_index[0] is not None:
undolog_cursor.execute('''DELETE FROM undolog WHERE undo_index < ?''', (first_undo_index[0][0],))
undolog_cursor.execute('''DELETE FROM undolog_block WHERE block_index < ?''',
(undolog_oldest_block_index,))
# Set undolog barrier for this block
if block_index != config.BLOCK_FIRST:
undolog_cursor.execute('''INSERT OR REPLACE INTO undolog_block(block_index, first_undo_index)
SELECT ?, seq+1 FROM SQLITE_SEQUENCE WHERE name='undolog' ''', (block_index,))
else:
undolog_cursor.execute('''INSERT OR REPLACE INTO undolog_block(block_index, first_undo_index)
VALUES(?,?)''', (block_index, 1,))
undolog_cursor.close()
# Expire orders, bets and rps.
order.expire(db, block_index)
bet.expire(db, block_index, block_time)
rps.expire(db, block_index)
# Parse transactions, sorting them by type.
cursor = db.cursor()
cursor.execute('''SELECT * FROM transactions \
WHERE block_index=? ORDER BY tx_index''',
(block_index,))
txlist = []
for tx in list(cursor):
try:
parse_tx(db, tx)
txlist.append('{}{}{}{}{}{}'.format(tx['tx_hash'], tx['source'], tx['destination'],
tx['btc_amount'], tx['fee'],
binascii.hexlify(tx['data']).decode('UTF-8')))
except exceptions.ParseTransactionError as e:
logger.warn('ParseTransactionError for tx %s: %s' % (tx['tx_hash'], e))
raise e
#pass
cursor.close()
# Calculate consensus hashes.
new_txlist_hash, found_txlist_hash = check.consensus_hash(db, 'txlist_hash', previous_txlist_hash, txlist)
new_ledger_hash, found_ledger_hash = check.consensus_hash(db, 'ledger_hash', previous_ledger_hash, util.BLOCK_LEDGER)
new_messages_hash, found_messages_hash = check.consensus_hash(db, 'messages_hash', previous_messages_hash, database.BLOCK_MESSAGES)
return new_ledger_hash, new_txlist_hash, new_messages_hash, found_messages_hash
def initialise(db):
"""Initialise data, create and populate the database."""
cursor = db.cursor()
# Blocks
cursor.execute('''CREATE TABLE IF NOT EXISTS blocks(
block_index INTEGER UNIQUE,
block_hash TEXT UNIQUE,
block_time INTEGER,
previous_block_hash TEXT UNIQUE,
difficulty INTEGER,
PRIMARY KEY (block_index, block_hash))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON blocks (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
index_hash_idx ON blocks (block_index, block_hash)
''')
# SQLite can’t do `ALTER TABLE IF COLUMN NOT EXISTS`.
columns = [column['name'] for column in cursor.execute('''PRAGMA table_info(blocks)''')]
if 'ledger_hash' not in columns:
cursor.execute('''ALTER TABLE blocks ADD COLUMN ledger_hash TEXT''')
if 'txlist_hash' not in columns:
cursor.execute('''ALTER TABLE blocks ADD COLUMN txlist_hash TEXT''')
if 'messages_hash' not in columns:
cursor.execute('''ALTER TABLE blocks ADD COLUMN messages_hash TEXT''')
if 'previous_block_hash' not in columns:
cursor.execute('''ALTER TABLE blocks ADD COLUMN previous_block_hash TEXT''')
if 'difficulty' not in columns:
cursor.execute('''ALTER TABLE blocks ADD COLUMN difficulty TEXT''')
# Check that first block in DB is BLOCK_FIRST.
cursor.execute('''SELECT * from blocks ORDER BY block_index''')
blocks = list(cursor)
if len(blocks):
if blocks[0]['block_index'] != config.BLOCK_FIRST:
raise exceptions.DatabaseError('First block in database is not block {}.'.format(config.BLOCK_FIRST))
# Transactions
cursor.execute('''CREATE TABLE IF NOT EXISTS transactions(
tx_index INTEGER UNIQUE,
tx_hash TEXT UNIQUE,
block_index INTEGER,
block_hash TEXT,
block_time INTEGER,
source TEXT,
destination TEXT,
btc_amount INTEGER,
fee INTEGER,
data BLOB,
supported BOOL DEFAULT 1,
FOREIGN KEY (block_index, block_hash) REFERENCES blocks(block_index, block_hash),
PRIMARY KEY (tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON transactions (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx_index_idx ON transactions (tx_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
tx_hash_idx ON transactions (tx_hash)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
index_index_idx ON transactions (block_index, tx_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
index_hash_index_idx ON transactions (tx_index, tx_hash, block_index)
''')
# Purge database of blocks, transactions from before BLOCK_FIRST.
cursor.execute('''DELETE FROM blocks WHERE block_index < ?''', (config.BLOCK_FIRST,))
cursor.execute('''DELETE FROM transactions WHERE block_index < ?''', (config.BLOCK_FIRST,))
# (Valid) debits
cursor.execute('''CREATE TABLE IF NOT EXISTS debits(
block_index INTEGER,
address TEXT,
asset TEXT,
quantity INTEGER,
action TEXT,
event TEXT,
FOREIGN KEY (block_index) REFERENCES blocks(block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
address_idx ON debits (address)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
asset_idx ON debits (asset)
''')
# (Valid) credits
cursor.execute('''CREATE TABLE IF NOT EXISTS credits(
block_index INTEGER,
address TEXT,
asset TEXT,
quantity INTEGER,
calling_function TEXT,
event TEXT,
FOREIGN KEY (block_index) REFERENCES blocks(block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
address_idx ON credits (address)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
asset_idx ON credits (asset)
''')
# Balances
cursor.execute('''CREATE TABLE IF NOT EXISTS balances(
address TEXT,
asset TEXT,
quantity INTEGER)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
address_asset_idx ON balances (address, asset)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
address_idx ON balances (address)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
asset_idx ON balances (asset)
''')
# Assets
# TODO: Store more asset info here?!
cursor.execute('''CREATE TABLE IF NOT EXISTS assets(
asset_id TEXT UNIQUE,
asset_name TEXT UNIQUE,
block_index INTEGER,
asset_longname TEXT)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
name_idx ON assets (asset_name)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
id_idx ON assets (asset_id)
''')
# Add asset_longname for sub-assets
# SQLite can’t do `ALTER TABLE IF COLUMN NOT EXISTS`.
columns = [column['name'] for column in cursor.execute('''PRAGMA table_info(assets)''')]
if 'asset_longname' not in columns:
cursor.execute('''ALTER TABLE assets ADD COLUMN asset_longname TEXT''')
cursor.execute('''CREATE UNIQUE INDEX IF NOT EXISTS asset_longname_idx ON assets(asset_longname)''')
cursor.execute('''SELECT * FROM assets WHERE asset_name = ?''', ('UNO',))
if not list(cursor):
cursor.execute('''INSERT INTO assets VALUES (?,?,?,?)''', ('0', 'UNO', None, None))
cursor.execute('''INSERT INTO assets VALUES (?,?,?,?)''', ('1', 'XUP', None, None))
# Addresses
# Leaving this here because in the future this could work for other things besides broadcast
cursor.execute('''CREATE TABLE IF NOT EXISTS addresses(
address TEXT UNIQUE,
options INTEGER,
block_index INTEGER)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
addresses_idx ON addresses (address)
''')
# Consolidated
send.initialise(db)
destroy.initialise(db)
order.initialise(db)
btcpay.initialise(db)
issuance.initialise(db)
broadcast.initialise(db)
bet.initialise(db)
dividend.initialise(db)
burn.initialise(db)
cancel.initialise(db)
rps.initialise(db)
rpsresolve.initialise(db)
sweep.initialise(db)
dispenser.initialise(db)
# Messages
cursor.execute('''CREATE TABLE IF NOT EXISTS messages(
message_index INTEGER PRIMARY KEY,
block_index INTEGER,
command TEXT,
category TEXT,
bindings TEXT,
timestamp INTEGER)
''')
# TODO: FOREIGN KEY (block_index) REFERENCES blocks(block_index) DEFERRABLE INITIALLY DEFERRED)
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON messages (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_message_index_idx ON messages (block_index, message_index)
''')
# Create undolog tables
cursor.execute('''CREATE TABLE IF NOT EXISTS undolog(
undo_index INTEGER PRIMARY KEY AUTOINCREMENT,
sql TEXT)
''')
cursor.execute('''CREATE TABLE IF NOT EXISTS undolog_block(
block_index INTEGER PRIMARY KEY,
first_undo_index INTEGER)
''')
# Create undolog triggers for all tables in TABLES list, plus the 'balances' table
for table in UNDOLOG_TABLES:
columns = [column['name'] for column in cursor.execute('''PRAGMA table_info({})'''.format(table))]
cursor.execute('''CREATE TRIGGER IF NOT EXISTS _{}_insert AFTER INSERT ON {} BEGIN
INSERT INTO undolog VALUES(NULL, 'DELETE FROM {} WHERE rowid='||new.rowid);
END;
'''.format(table, table, table))
columns_parts = ["{}='||quote(old.{})||'".format(c, c) for c in columns]
cursor.execute('''CREATE TRIGGER IF NOT EXISTS _{}_update AFTER UPDATE ON {} BEGIN
INSERT INTO undolog VALUES(NULL, 'UPDATE {} SET {} WHERE rowid='||old.rowid);
END;
'''.format(table, table, table, ','.join(columns_parts)))
columns_parts = ["'||quote(old.{})||'".format(c) for c in columns]
cursor.execute('''CREATE TRIGGER IF NOT EXISTS _{}_delete BEFORE DELETE ON {} BEGIN
INSERT INTO undolog VALUES(NULL, 'INSERT INTO {}(rowid,{}) VALUES('||old.rowid||',{})');
END;
'''.format(table, table, table, ','.join(columns), ','.join(columns_parts)))
# Drop undolog tables on messages table if they exist (fix for adding them in 9.52.0)
for trigger_type in ('insert', 'update', 'delete'):
cursor.execute("DROP TRIGGER IF EXISTS _messages_{}".format(trigger_type))
# Mempool messages
# NOTE: `status`, 'block_index` are removed from bindings.
cursor.execute('''DROP TABLE IF EXISTS mempool''')
cursor.execute('''CREATE TABLE mempool(
tx_hash TEXT,
command TEXT,
category TEXT,
bindings TEXT,
timestamp INTEGER)
''')
cursor.close()
def get_tx_info(tx_hex, block_parser=None, block_index=None, db=None):
"""Get the transaction info. Returns normalized None data for DecodeError and BTCOnlyError."""
try:
return _get_tx_info(tx_hex, block_parser, block_index)
except DecodeError as e:
return b'', None, None, None, None, None
except BTCOnlyError as e:
# NOTE: For debugging, logger.debug('Could not decode: ' + str(e))
if util.enabled('dispensers', block_index):
try:
return b'', None, None, None, None, _get_swap_tx(e.decodedTx, block_parser, block_index, db=db)
except: # (DecodeError, backend.indexd.BackendRPCError) as e:
return b'', None, None, None, None, None
else:
return b'', None, None, None, None, None
def _get_swap_tx(decoded_tx, block_parser=None, block_index=None, db=None):
def get_pubkeyhash(scriptpubkey):
asm = script.get_asm(scriptpubkey)
if len(asm) != 5 or asm[0] != 'OP_DUP' or asm[1] != 'OP_HASH160' or asm[3] != 'OP_EQUALVERIFY' or asm[4] != 'OP_CHECKSIG':
return False
return asm[2]
def get_address(scriptpubkey):
if util.enabled('correct_segwit_txids') and scriptpubkey.is_witness_v0_keyhash():
pubkey = scriptpubkey[2:]
address = str(bitcoinlib.bech32.CBech32Data.from_bytes(0, pubkey))
return address
else:
pubkeyhash = get_pubkeyhash(scriptpubkey)
if not pubkeyhash:
return False
pubkeyhash = binascii.hexlify(pubkeyhash).decode('utf-8')
address = script.base58_check_encode(pubkeyhash, config.ADDRESSVERSION)
# Test decoding of address.
if address != config.UNSPENDABLE and binascii.unhexlify(bytes(pubkeyhash, 'utf-8')) != script.base58_check_decode(address, config.ADDRESSVERSION):
return False
return address
outputs = []
check_sources = db == None # If we didn't get passed a database cursor, assume we have to check for dispenser
for vout in decoded_tx.vout:
address = get_address(vout.scriptPubKey)
destination = None
btc_amount = None
if address:
destination = address
btc_amount = vout.nValue
elif util.enabled('hotfix_dispensers_with_non_p2pkh'):
asm = script.get_asm(vout.scriptPubKey)
if asm[-1] == 'OP_CHECKSIG':
destination, new_data = decode_checksig(asm, decoded_tx)
elif asm[-1] == 'OP_CHECKMULTISIG':
destination, new_data = decode_checkmultisig(asm, decoded_tx)
elif asm[0] == 'OP_HASH160' and asm[-1] == 'OP_EQUAL' and len(asm) == 3:
destination, new_data = decode_scripthash(asm)
elif asm[0] == 'OP_RETURN':
pass #Just ignore.
elif util.enabled('segwit_support') and asm[0] == 0:
# Segwit output
destination, new_data = decode_p2w(vout.scriptPubKey)
else:
logger.error('unrecognised scriptPubkey. Just ignore this: ' + str(asm))
if destination and not new_data:
amount = vout.nValue
else:
logger.error('cannot parse destination address or new_data found: ' + str(asm))
if db != None and dispenser.is_dispensable(db, destination, btc_amount):
check_sources = True
outputs.append((destination, btc_amount))
# Collect all (unique) source addresses.
# if we haven't found them yet
sources = []
if check_sources:
for vin in decoded_tx.vin[:]: # Loop through inputs.
if block_parser:
vin_tx = block_parser.read_raw_transaction(ib2h(vin.prevout.hash))
vin_ctx = backend.deserialize(vin_tx['__data__'])
else:
vin_tx = backend.getrawtransaction(ib2h(vin.prevout.hash)) # TODO: Biggest penalty on parsing is here
vin_ctx = backend.deserialize(vin_tx)
vout = vin_ctx.vout[vin.prevout.n]
asm = script.get_asm(vout.scriptPubKey)
if asm[-1] == 'OP_CHECKSIG':
new_source, new_data = decode_checksig(asm, decoded_tx)
if new_data or not new_source:
raise DecodeError('data in source')
elif asm[-1] == 'OP_CHECKMULTISIG':
new_source, new_data = decode_checkmultisig(asm, decoded_tx)
if new_data or not new_source:
raise DecodeError('data in source')
elif asm[0] == 'OP_HASH160' and asm[-1] == 'OP_EQUAL' and len(asm) == 3:
new_source, new_data = decode_scripthash(asm)
if new_data or not new_source:
raise DecodeError('data in source')
elif util.enabled('segwit_support') and asm[0] == 0:
# Segwit output
# Get the full transaction data for this input transaction.
new_source, new_data = decode_p2w(vout.scriptPubKey)
else:
raise DecodeError('unrecognised source type')
# old; append to sources, results in invalid addresses
# new; first found source is source, the rest can be anything (to fund the TX for example)
if not (util.enabled('first_input_is_source') and len(sources)):
# Collect unique sources.
if new_source not in sources:
sources.append(new_source)
return (sources, outputs)
def _get_tx_info(tx_hex, block_parser=None, block_index=None, p2sh_is_segwit=False):
"""Get the transaction info. Calls one of two subfunctions depending on signature type."""
if not block_index:
block_index = util.CURRENT_BLOCK_INDEX
if util.enabled('p2sh_addresses', block_index=block_index): # Protocol change.
return get_tx_info3(tx_hex, block_parser=block_parser, p2sh_is_segwit=p2sh_is_segwit)
elif util.enabled('multisig_addresses', block_index=block_index): # Protocol change.
return get_tx_info2(tx_hex, block_parser=block_parser)
else:
return get_tx_info1(tx_hex, block_index, block_parser=block_parser)
def get_tx_info1(tx_hex, block_index, block_parser=None):
"""Get singlesig transaction info.
The destination, if it exists, always comes before the data output; the
change, if it exists, always comes after.
"""
ctx = backend.deserialize(tx_hex)
def get_pubkeyhash(scriptpubkey):
asm = script.get_asm(scriptpubkey)
if len(asm) != 5 or asm[0] != 'OP_DUP' or asm[1] != 'OP_HASH160' or asm[3] != 'OP_EQUALVERIFY' or asm[4] != 'OP_CHECKSIG':
return False
return asm[2]
def get_address(scriptpubkey):
pubkeyhash = get_pubkeyhash(scriptpubkey)
if not pubkeyhash:
return False
pubkeyhash = binascii.hexlify(pubkeyhash).decode('utf-8')
address = script.base58_check_encode(pubkeyhash, config.ADDRESSVERSION)
# Test decoding of address.
if address != config.UNSPENDABLE and binascii.unhexlify(bytes(pubkeyhash, 'utf-8')) != script.base58_check_decode(address, config.ADDRESSVERSION):
return False
return address
# Fee is the input values minus output values.
fee = 0
# Get destination output and data output.
destination, btc_amount, data = None, None, b''
pubkeyhash_encoding = False
for vout in ctx.vout:
fee -= vout.nValue
# Sum data chunks to get data. (Can mix OP_RETURN and multi-sig.)
asm = script.get_asm(vout.scriptPubKey)
if len(asm) == 2 and asm[0] == 'OP_RETURN': # OP_RETURN
if type(asm[1]) != bytes:
continue
data_chunk = asm[1]
data += data_chunk
elif len(asm) == 5 and asm[0] == 1 and asm[3] == 2 and asm[4] == 'OP_CHECKMULTISIG': # Multi-sig
if type(asm[2]) != bytes:
continue
data_pubkey = asm[2]
data_chunk_length = data_pubkey[0] # No ord() necessary.
data_chunk = data_pubkey[1:data_chunk_length + 1]
data += data_chunk
elif len(asm) == 5 and (block_index >= 293000 or config.TESTNET or config.REGTEST): # Protocol change.
# Be strict.
pubkeyhash = get_pubkeyhash(vout.scriptPubKey)
if not pubkeyhash:
continue
if ctx.is_coinbase():
raise DecodeError('coinbase transaction')
obj1 = arc4.init_arc4(ctx.vin[0].prevout.hash[::-1])
data_pubkey = obj1.decrypt(pubkeyhash)
if data_pubkey[1:9] == config.PREFIX or pubkeyhash_encoding:
pubkeyhash_encoding = True
data_chunk_length = data_pubkey[0] # No ord() necessary.
data_chunk = data_pubkey[1:data_chunk_length + 1]
if data_chunk[-8:] == config.PREFIX:
data += data_chunk[:-8]
break
else:
data += data_chunk
# Destination is the first output before the data.
if not destination and not btc_amount and not data:
address = get_address(vout.scriptPubKey)
if address:
destination = address
btc_amount = vout.nValue
# Check for, and strip away, prefix (except for burns).
if destination == config.UNSPENDABLE:
pass
elif data[:len(config.PREFIX)] == config.PREFIX:
data = data[len(config.PREFIX):]
else:
raise DecodeError('no prefix')
# Only look for source if data were found or destination is UNSPENDABLE, for speed.
if not data and destination != config.UNSPENDABLE:
raise BTCOnlyError('no data and not unspendable')
# Collect all possible source addresses; ignore coinbase transactions and anything but the simplest Pay‐to‐PubkeyHash inputs.
source_list = []
for vin in ctx.vin[:]: # Loop through input transactions.
if vin.prevout.is_null():
raise DecodeError('coinbase transaction')
# Get the full transaction data for this input transaction.
if block_parser:
vin_tx = block_parser.read_raw_transaction(ib2h(vin.prevout.hash))
vin_ctx = backend.deserialize(vin_tx['__data__'])
else:
vin_tx = backend.getrawtransaction(ib2h(vin.prevout.hash))
vin_ctx = backend.deserialize(vin_tx)
vout = vin_ctx.vout[vin.prevout.n]
fee += vout.nValue
address = get_address(vout.scriptPubKey)
if not address:
raise DecodeError('invalid scriptpubkey')
else:
source_list.append(address)
# Require that all possible source addresses be the same.
if all(x == source_list[0] for x in source_list):
source = source_list[0]
else:
source = None
return source, destination, btc_amount, fee, data, None
def get_tx_info3(tx_hex, block_parser=None, p2sh_is_segwit=False):
return get_tx_info2(tx_hex, block_parser=block_parser, p2sh_support=True, p2sh_is_segwit=p2sh_is_segwit)
def arc4_decrypt(cyphertext, ctx):
'''Un‐obfuscate. Initialise key once per attempt.'''
key = arc4.init_arc4(ctx.vin[0].prevout.hash[::-1])
return key.decrypt(cyphertext)
def get_opreturn(asm):
if len(asm) == 2 and asm[0] == 'OP_RETURN':
pubkeyhash = asm[1]
if type(pubkeyhash) == bytes:
return pubkeyhash
raise DecodeError('invalid OP_RETURN')
def decode_opreturn(asm, ctx):
chunk = get_opreturn(asm)
chunk = arc4_decrypt(chunk, ctx)
if chunk[:len(config.PREFIX)] == config.PREFIX: # Data
destination, data = None, chunk[len(config.PREFIX):]
else:
raise DecodeError('unrecognised OP_RETURN output')
return destination, data
def decode_checksig(asm, ctx):
pubkeyhash = script.get_checksig(asm)
chunk = arc4_decrypt(pubkeyhash, ctx)
if chunk[1:len(config.PREFIX) + 1] == config.PREFIX: # Data
# Padding byte in each output (instead of just in the last one) so that encoding methods may be mixed. Also, it’s just not very much data.
chunk_length = chunk[0]
chunk = chunk[1:chunk_length + 1]
destination, data = None, chunk[len(config.PREFIX):]
else: # Destination
pubkeyhash = binascii.hexlify(pubkeyhash).decode('utf-8')
destination, data = script.base58_check_encode(pubkeyhash, config.ADDRESSVERSION), None
return destination, data
def decode_scripthash(asm):
destination = script.base58_check_encode(binascii.hexlify(asm[1]).decode('utf-8'), config.P2SH_ADDRESSVERSION)
return destination, None
def decode_checkmultisig(asm, ctx):
pubkeys, signatures_required = script.get_checkmultisig(asm)
chunk = b''
for pubkey in pubkeys[:-1]: # (No data in last pubkey.)
chunk += pubkey[1:-1] # Skip sign byte and nonce byte.
chunk = arc4_decrypt(chunk, ctx)
if chunk[1:len(config.PREFIX) + 1] == config.PREFIX: # Data
# Padding byte in each output (instead of just in the last one) so that encoding methods may be mixed. Also, it’s just not very much data.
chunk_length = chunk[0]
chunk = chunk[1:chunk_length + 1]
destination, data = None, chunk[len(config.PREFIX):]
else: # Destination
pubkeyhashes = [script.pubkey_to_pubkeyhash(pubkey) for pubkey in pubkeys]
destination, data = script.construct_array(signatures_required, pubkeyhashes, len(pubkeyhashes)), None
return destination, data
def decode_p2w(script_pubkey):
bech32 = bitcoinlib.bech32.CBech32Data.from_bytes(0, script_pubkey[2:22])
return str(bech32), None
def get_tx_info2(tx_hex, block_parser=None, p2sh_support=False, p2sh_is_segwit=False):
"""Get multisig transaction info.
The destinations, if they exists, always comes before the data output; the
change, if it exists, always comes after.
"""
# Decode transaction binary.
ctx = backend.deserialize(tx_hex)
# Ignore coinbase transactions.
if ctx.is_coinbase():
raise DecodeError('coinbase transaction')
# Get destinations and data outputs.
destinations, btc_amount, fee, data = [], 0, 0, b''
for vout in ctx.vout:
# Fee is the input values minus output values.
output_value = vout.nValue
fee -= output_value
# Ignore transactions with invalid script.
try:
asm = script.get_asm(vout.scriptPubKey)
except CScriptInvalidError as e:
raise DecodeError(e)
if asm[0] == 'OP_RETURN':
new_destination, new_data = decode_opreturn(asm, ctx)
elif asm[-1] == 'OP_CHECKSIG':
new_destination, new_data = decode_checksig(asm, ctx)
elif asm[-1] == 'OP_CHECKMULTISIG':
try:
new_destination, new_data = decode_checkmultisig(asm, ctx)
except:
raise DecodeError('unrecognised output type')
elif p2sh_support and asm[0] == 'OP_HASH160' and asm[-1] == 'OP_EQUAL' and len(asm) == 3:
new_destination, new_data = decode_scripthash(asm)
elif util.enabled('segwit_support') and asm[0] == 0:
# Segwit Vout, second param is redeemScript
#redeemScript = asm[1]
#new_destination, new_data = None, None
continue
else:
raise DecodeError('unrecognised output type')
assert not (new_destination and new_data)
assert new_destination != None or new_data != None # `decode_*()` should never return `None, None`.
if util.enabled('null_data_check'):
if new_data == []:
raise DecodeError('new destination is `None`')
# All destinations come before all data.
if not data and not new_data and destinations != [config.UNSPENDABLE,]:
destinations.append(new_destination)
btc_amount += output_value
else:
if new_destination: # Change.
break
else: # Data.
data += new_data
# source can be determined by parsing the p2sh_data transaction
# or from the first spent output
sources = []
# P2SH encoding signalling
p2sh_encoding_source = None
if util.enabled('p2sh_encoding') and data == b'P2SH':
data = b''
for vin in ctx.vin:
# Ignore transactions with invalid script.
try:
asm = script.get_asm(vin.scriptSig)
except CScriptInvalidError as e:
raise DecodeError(e)
new_source, new_destination, new_data = p2sh_encoding.decode_p2sh_input(asm, p2sh_is_segwit=p2sh_is_segwit)
# this could be a p2sh source address with no encoded data
if new_data is None:
continue;
if new_source is not None:
if p2sh_encoding_source is not None and new_source != p2sh_encoding_source:
# this p2sh data input has a bad source address
raise DecodeError('inconsistent p2sh inputs')
p2sh_encoding_source = new_source
assert not new_destination
data += new_data
# Only look for source if data were found or destination is `UNSPENDABLE`,
# for speed.
if not data and destinations != [config.UNSPENDABLE,]:
raise BTCOnlyError('no data and not unspendable', ctx)
# Collect all (unique) source addresses.
# if we haven't found them yet
for vin in ctx.vin[:]: # Loop through inputs.
# Get the full transaction data for this input transaction.
if block_parser:
vin_tx = block_parser.read_raw_transaction(ib2h(vin.prevout.hash))
vin_ctx = backend.deserialize(vin_tx['__data__'])
else:
vin_tx = backend.getrawtransaction(ib2h(vin.prevout.hash))
vin_ctx = backend.deserialize(vin_tx)
vout = vin_ctx.vout[vin.prevout.n]
fee += vout.nValue
asm = script.get_asm(vout.scriptPubKey)
if asm[-1] == 'OP_CHECKSIG':
new_source, new_data = decode_checksig(asm, ctx)
if new_data or not new_source:
raise DecodeError('data in source')
elif asm[-1] == 'OP_CHECKMULTISIG':
new_source, new_data = decode_checkmultisig(asm, ctx)
if new_data or not new_source:
raise DecodeError('data in source')
elif p2sh_support and asm[0] == 'OP_HASH160' and asm[-1] == 'OP_EQUAL' and len(asm) == 3:
new_source, new_data = decode_scripthash(asm)
if new_data or not new_source:
raise DecodeError('data in source')
elif util.enabled('segwit_support') and asm[0] == 0:
# Segwit output
new_source, new_data = decode_p2w(vout.scriptPubKey)
else:
raise DecodeError('unrecognised source type')
# old; append to sources, results in invalid addresses
# new; first found source is source, the rest can be anything (to fund the TX for example)
if not (util.enabled('first_input_is_source') and len(sources)):
# Collect unique sources.
if new_source not in sources:
sources.append(new_source)
# use the source from the p2sh data source
if p2sh_encoding_source is not None:
sources = p2sh_encoding_source
else:
sources = '-'.join(sources)
destinations = '-'.join(destinations)
return sources, destinations, btc_amount, round(fee), data, None
def reinitialise(db, block_index=None):
"""Drop all predefined tables and initialise the database once again."""
cursor = db.cursor()
# Delete all of the results of parsing (including the undolog)
for table in TABLES + ['balances', 'undolog', 'undolog_block']:
cursor.execute('''DROP TABLE IF EXISTS {}'''.format(table))
# Create missing tables
initialise(db)
# clean consensus hashes if first block hash doesn't match with checkpoint.
if config.TESTNET:
checkpoints = check.CHECKPOINTS_TESTNET
elif config.REGTEST:
checkpoints = check.CHECKPOINTS_REGTEST
else:
checkpoints = check.CHECKPOINTS_MAINNET
columns = [column['name'] for column in cursor.execute('''PRAGMA table_info(blocks)''')]
for field in ['ledger_hash', 'txlist_hash']:
if field in columns:
sql = '''SELECT {} FROM blocks WHERE block_index = ?'''.format(field)
first_block = list(cursor.execute(sql, (config.BLOCK_FIRST,)))
if first_block:
first_hash = first_block[0][field]
if first_hash != checkpoints[config.BLOCK_FIRST][field]:
logger.info('First hash changed. Cleaning {}.'.format(field))
cursor.execute('''UPDATE blocks SET {} = NULL'''.format(field))
# For rollbacks, just delete new blocks and then reparse what’s left.
if block_index:
cursor.execute('''DELETE FROM transactions WHERE block_index > ?''', (block_index,))
cursor.execute('''DELETE FROM blocks WHERE block_index > ?''', (block_index,))
elif config.TESTNET or config.REGTEST: # block_index NOT specified and we are running testnet
# just blow away the consensus hashes with a full testnet reparse, as we could activate
# new features retroactively, which could otherwise lead to ConsensusError exceptions being raised.
logger.info("Testnet/regtest full reparse detected: Clearing all consensus hashes before performing reparse.")
cursor.execute('''UPDATE blocks SET ledger_hash = NULL, txlist_hash = NULL, messages_hash = NULL''')
cursor.close()
def reparse(db, block_index=None, quiet=False):
"""Reparse all transactions (atomically). If block_index is set, rollback
to the end of that block.
"""
def reparse_from_undolog(db, block_index, quiet):
"""speedy reparse method that utilizes the undolog.
if fails, fallback to the full reparse method"""
if not block_index:
return False # Can't reparse from undolog
undolog_cursor = db.cursor()
undolog_cursor.setexectrace(None)
undolog_cursor.setrowtrace(None)
def get_block_index_for_undo_index(undo_indexes, undo_index):
for block_index, first_undo_index in undo_indexes.items(): #in order
if undo_index < first_undo_index:
return block_index - 1
else:
return next(reversed(undo_indexes)) #the last inserted block_index
with db:
# Check if we can reparse from the undolog
results = list(undolog_cursor.execute(
'''SELECT block_index, first_undo_index FROM undolog_block WHERE block_index >= ? ORDER BY block_index ASC''', (block_index,)))
undo_indexes = collections.OrderedDict()
for result in results:
undo_indexes[result[0]] = result[1]
undo_start_block_index = block_index + 1
if undo_start_block_index not in undo_indexes:
if block_index in undo_indexes:
# Edge case, should only happen if we're "rolling back" to latest block (e.g. via cmd line)
return True #skip undo
else:
return False # Undolog doesn't go that far back, full reparse required...
# Grab the undolog...
undolog = list(undolog_cursor.execute(
'''SELECT undo_index, sql FROM undolog WHERE undo_index >= ? ORDER BY undo_index DESC''',
(undo_indexes[undo_start_block_index],)))
# Replay the undolog backwards, from the last entry to first_undo_index...
for entry in undolog:
logger.info("Undolog: Block {} (undo_index {}): {}".format(
get_block_index_for_undo_index(undo_indexes, entry[0]), entry[0], entry[1]))
undolog_cursor.execute(entry[1])
# Trim back tx and blocks
undolog_cursor.execute('''DELETE FROM transactions WHERE block_index > ?''', (block_index,))
undolog_cursor.execute('''DELETE FROM blocks WHERE block_index > ?''', (block_index,))
# As well as undolog entries...
undolog_cursor.execute('''DELETE FROM undolog WHERE undo_index >= ?''', (undo_indexes[undo_start_block_index],))
undolog_cursor.execute('''DELETE FROM undolog_block WHERE block_index >= ?''', (undo_start_block_index,))
undolog_cursor.close()
return True
if block_index:
logger.info('Rolling back transactions to block {}.'.format(block_index))
else:
logger.info('Reparsing all transactions.')
check.software_version()
reparse_start = time.time()
# Reparse from the undolog if possible
reparsed = reparse_from_undolog(db, block_index, quiet)
cursor = db.cursor()
if not reparsed:
if block_index:
logger.info("Could not roll back from undolog. Performing full reparse instead...")
if quiet:
root_logger = logging.getLogger()
root_level = logger.getEffectiveLevel()
with db:
reinitialise(db, block_index)
# Reparse all blocks, transactions.
if quiet:
root_logger.setLevel(logging.WARNING)
previous_ledger_hash, previous_txlist_hash, previous_messages_hash = None, None, None
cursor.execute('''SELECT * FROM blocks ORDER BY block_index''')
for block in cursor.fetchall():
util.CURRENT_BLOCK_INDEX = block['block_index']
previous_ledger_hash, previous_txlist_hash, previous_messages_hash, previous_found_messages_hash = parse_block(
db, block['block_index'], block['block_time'],
previous_ledger_hash=previous_ledger_hash,
previous_txlist_hash=previous_txlist_hash,
previous_messages_hash=previous_messages_hash)
if quiet and block['block_index'] % 10 == 0: # every 10 blocks print status
root_logger.setLevel(logging.INFO)
logger.info('Block (re-parse): %s (hashes: L:%s / TX:%s / M:%s%s)' % (
block['block_index'], previous_ledger_hash[-5:], previous_txlist_hash[-5:], previous_messages_hash[-5:],
(' [overwrote %s]' % previous_found_messages_hash) if previous_found_messages_hash and previous_found_messages_hash != previous_messages_hash else ''))
if quiet and block['block_index'] % 10 == 0:
root_logger.setLevel(logging.WARNING)
if quiet:
root_logger.setLevel(root_level)
with db:
# Check for conservation of assets.
check.asset_conservation(db)
# Update database version number.
database.update_version(db)
cursor.close()
reparse_end = time.time()
logger.info("Reparse took {:.3f} minutes.".format((reparse_end - reparse_start) / 60.0))
# on full reparse - vacuum the DB afterwards for better subsequent performance (especially on non-SSDs)
if not block_index:
database.vacuum(db)
def list_tx(db, block_hash, block_index, block_time, tx_hash, tx_index, tx_hex=None):
assert type(tx_hash) == str
cursor = db.cursor()
# Edge case: confirmed tx_hash also in mempool
cursor.execute('''SELECT * FROM transactions WHERE tx_hash = ?''', (tx_hash,))
transactions = list(cursor)
if transactions:
return tx_index
# Get the important details about each transaction.
if tx_hex is None:
tx_hex = backend.getrawtransaction(tx_hash) # TODO: This is the call that is stalling the process the most
source, destination, btc_amount, fee, data, decoded_tx = get_tx_info(tx_hex, db=db)
if not source and decoded_tx and util.enabled('dispensers', block_index):
outputs = decoded_tx[1]
for out in outputs:
if out[0] != decoded_tx[0][0] and dispenser.is_dispensable(db, out[0], out[1]):
source = decoded_tx[0][0]
destination = out[0]
btc_amount = out[1]
fee = 0
data = struct.pack(config.SHORT_TXTYPE_FORMAT, dispenser.DISPENSE_ID)
data += b'\x00'
break # Prevent inspection of further dispenses (only first one is valid)
# For mempool
if block_hash == None:
block_hash = config.MEMPOOL_BLOCK_HASH
block_index = config.MEMPOOL_BLOCK_INDEX
else:
assert block_index == util.CURRENT_BLOCK_INDEX
if source and (data or destination == config.UNSPENDABLE or decoded_tx):
logger.debug('Saving transaction: {}'.format(tx_hash))
cursor.execute('''INSERT INTO transactions(
tx_index,
tx_hash,
block_index,
block_hash,
block_time,
source,
destination,
btc_amount,
fee,
data) VALUES(?,?,?,?,?,?,?,?,?,?)''',
(tx_index,
tx_hash,
block_index,
block_hash,
block_time,
source,
destination,
btc_amount,
fee,
data)
)
cursor.close()
return tx_index + 1
else:
logger.getChild('list_tx.skip').debug('Skipping transaction: {}'.format(tx_hash))
return tx_index
def kickstart(db, bitcoind_dir):
if bitcoind_dir is None:
if platform.system() == 'Darwin':
bitcoind_dir = os.path.expanduser('~/Library/Application Support/Unobtanium/')
elif platform.system() == 'Windows':
bitcoind_dir = os.path.join(os.environ['APPDATA'], 'Unobtanium')
else:
bitcoind_dir = os.path.expanduser('~/.unobtanium')
if not os.path.isdir(bitcoind_dir):
raise Exception('Unobtanium Core data directory not found at {}. Use --bitcoind-dir parameter.'.format(bitcoind_dir))
cursor = db.cursor()
logger.warning('''Warning:
- Ensure that unobtaniumd is stopped.
- You must reindex unobtaniumd after the initialization is complete (restart with `-reindex=1`)
- The initialization may take a while.''')
if input('Proceed with the initialization? (y/N) : ') != 'y':
return
if config.TESTNET:
first_hash = config.BLOCK_FIRST_TESTNET_HASH
elif config.REGTEST:
first_hash = config.BLOCK_FIRST_REGTEST_HASH
else:
first_hash = config.BLOCK_FIRST_MAINNET_HASH
start_time_total = time.time()
# Get hash of last known block.
chain_parser = ChainstateParser(os.path.join(bitcoind_dir, 'chainstate'))
last_hash = chain_parser.get_last_block_hash()
chain_parser.close()
# Start block parser.
block_parser = BlockchainParser(os.path.join(bitcoind_dir, 'blocks'), os.path.join(bitcoind_dir, 'blocks/index'))
current_hash = last_hash
tx_index = 0
with db:
# Prepare SQLite database. # TODO: Be more specific!
logger.info('Preparing database.')
start_time = time.time()
reinitialise(db, block_index=config.BLOCK_FIRST - 1)
logger.info('Prepared database in {:.3f}s'.format(time.time() - start_time))
# Get blocks and transactions, moving backwards in time.
while current_hash != None:
start_time = time.time()
transactions = []
# Get `tx_info`s for transactions in this block.
block = block_parser.read_raw_block(current_hash)
for tx in block['transactions']:
source, destination, btc_amount, fee, data = get_tx_info(tx['__data__'], block_parser=block_parser, block_index=block['block_index'])
if source and (data or destination == config.UNSPENDABLE):
transactions.append((
tx['tx_hash'], block['block_index'], block['block_hash'], block['block_time'],
source, destination, btc_amount, fee, data
))
logger.info('Valid transaction: {}'.format(tx['tx_hash']))
# Insert block and transactions into database.
cursor.execute('''INSERT INTO blocks(
block_index,
block_hash,
block_time) VALUES(?,?,?)''',
(block['block_index'],
block['block_hash'],
block['block_time']))
if len(transactions):
transactions = list(reversed(transactions))
tx_chunks = [transactions[i:i+90] for i in range(0, len(transactions), 90)]
for tx_chunk in tx_chunks:
sql = '''INSERT INTO transactions
(tx_index, tx_hash, block_index, block_hash, block_time, source, destination, btc_amount, fee, data)
VALUES '''
bindings = ()
bindings_place = []
# negative tx_index from -1 and inverse order for fast reordering # TODO: Can this be clearer?
for tx in tx_chunk:
bindings += (-(tx_index + 1),) + tx
bindings_place.append('''(?,?,?,?,?,?,?,?,?,?)''')
tx_index += 1
sql += ', '.join(bindings_place)
cursor.execute(sql, bindings)
logger.info('Block {} ({}): {}/{} saved in {:.3f}s'.format(
block['block_index'], block['block_hash'],
len(transactions), len(block['transactions']),
time.time() - start_time))
# Get hash of next block.
current_hash = block['hash_prev'] if current_hash != first_hash else None
block_parser.close()
# Reorder all transactions in database.
logger.info('Reordering transactions.')
start_time = time.time()
cursor.execute('''UPDATE transactions SET tx_index = tx_index + ?''', (tx_index,))
logger.info('Reordered transactions in {:.3f}s.'.format(time.time() - start_time))
# Parse all transactions in database.
reparse(db)
cursor.close()
logger.info('Total duration: {:.3f}s'.format(time.time() - start_time_total))
def last_db_index(db):
cursor = db.cursor()
try:
blocks = list(cursor.execute('''SELECT * FROM blocks WHERE block_index = (SELECT MAX(block_index) from blocks)'''))
try:
return blocks[0]['block_index']
except IndexError:
return 0
except apsw.SQLError:
return 0
def get_next_tx_index(db):
"""Return index of next transaction."""
cursor = db.cursor()
txes = list(cursor.execute('''SELECT * FROM transactions WHERE tx_index = (SELECT MAX(tx_index) from transactions)'''))
if txes:
assert len(txes) == 1
tx_index = txes[0]['tx_index'] + 1
else:
tx_index = 0
cursor.close()
return tx_index
class MempoolError(Exception):
pass
def follow(db):
# Check software version.
check.software_version()
# Initialise.
initialise(db)
# Get index of last block.
if util.CURRENT_BLOCK_INDEX == 0:
logger.warning('New database.')
block_index = config.BLOCK_FIRST
else:
block_index = util.CURRENT_BLOCK_INDEX + 1
# Check database version.
try:
check.database_version(db)
except check.DatabaseVersionError as e:
logger.info(str(e))
# no need to reparse or rollback a new database
if block_index != config.BLOCK_FIRST:
reparse(db, block_index=e.reparse_block_index, quiet=False)
else: #version update was included in reparse(), so don't do it twice
database.update_version(db)
logger.info('Resuming parsing.')
# Get index of last transaction.
tx_index = get_next_tx_index(db)
not_supported = {} # No false positives. Use a dict to allow for O(1) lookups
not_supported_sorted = collections.deque()
# ^ Entries in form of (block_index, tx_hash), oldest first. Allows for easy removal of past, unncessary entries
cursor = db.cursor()
# a reorg can happen without the block count increasing, or even for that
# matter, with the block count decreasing. This should only delay
# processing of the new blocks a bit.
while True:
start_time = time.time()
# Get block count.
# If the backend is unreachable and `config.FORCE` is set, just sleep
# and try again repeatedly.
try:
block_count = backend.getblockcount()
except (ConnectionRefusedError, http.client.CannotSendRequest, backend.addrindexrs_uno.BackendRPCError) as e:
if config.FORCE:
time.sleep(config.BACKEND_POLL_INTERVAL)
continue
else:
raise e
# Get new blocks.
if block_index <= block_count:
current_index = block_index
# Backwards check for incorrect blocks due to chain reorganisation, and stop when a common parent is found.
if block_count - block_index < 100: # Undolog only saves last 100 blocks, if there's a reorg deeper than that manual reparse should be done
requires_rollback = False
while True:
if current_index == config.BLOCK_FIRST:
break
logger.debug('Checking that block {} is not an orphan.'.format(current_index))
# Backend parent hash.
current_hash = backend.getblockhash(current_index)
current_cblock = backend.getblock(current_hash)
backend_parent = bitcoinlib.core.b2lx(current_cblock.hashPrevBlock)
# DB parent hash.
blocks = list(cursor.execute('''SELECT * FROM blocks
WHERE block_index = ?''', (current_index - 1,)))
if len(blocks) != 1: # For empty DB.
break
db_parent = blocks[0]['block_hash']
# Compare.
assert type(db_parent) == str
assert type(backend_parent) == str
if db_parent == backend_parent:
break
else:
current_index -= 1
requires_rollback = True
# Rollback for reorganisation.
if requires_rollback:
# Record reorganisation.
logger.warning('Blockchain reorganisation at block {}.'.format(current_index))
log.message(db, block_index, 'reorg', None, {'block_index': current_index})
# Rollback the DB.
reparse(db, block_index=current_index-1, quiet=True)
block_index = current_index
tx_index = get_next_tx_index(db)
continue
# Check version. (Don’t add any blocks to the database while
# running an out‐of‐date client!)
check.software_version()
# Get and parse transactions in this block (atomically).
block_hash = backend.getblockhash(current_index)
block = backend.getblock(block_hash)
previous_block_hash = bitcoinlib.core.b2lx(block.hashPrevBlock)
block_time = block.nTime
txhash_list, raw_transactions = backend.get_tx_list(block)
with db:
util.CURRENT_BLOCK_INDEX = block_index
# List the block.
cursor.execute('''INSERT INTO blocks(
block_index,
block_hash,
block_time,
previous_block_hash,
difficulty) VALUES(?,?,?,?,?)''',
(block_index,
block_hash,
block_time,
previous_block_hash,
block.difficulty)
)
# List the transactions in the block.
for tx_hash in txhash_list:
tx_hex = raw_transactions[tx_hash]
tx_index = list_tx(db, block_hash, block_index, block_time, tx_hash, tx_index, tx_hex)
# Parse the transactions in the block.
new_ledger_hash, new_txlist_hash, new_messages_hash, found_messages_hash = parse_block(db, block_index, block_time)
# When newly caught up, check for conservation of assets.
if block_index == block_count:
if config.CHECK_ASSET_CONSERVATION:
check.asset_conservation(db)
# Remove any non‐supported transactions older than ten blocks.
while len(not_supported_sorted) and not_supported_sorted[0][0] <= block_index - 10:
tx_h = not_supported_sorted.popleft()[1]
del not_supported[tx_h]
logger.info('Block: %s (%ss, hashes: L:%s / TX:%s / M:%s%s)' % (
str(block_index), "{:.2f}".format(time.time() - start_time, 3),
new_ledger_hash[-5:], new_txlist_hash[-5:], new_messages_hash[-5:],
(' [overwrote %s]' % found_messages_hash) if found_messages_hash and found_messages_hash != new_messages_hash else ''))
# Increment block index.
block_count = backend.getblockcount()
block_index += 1
else:
# TODO: add zeromq support here to await TXs and Blocks instead of constantly polling
# Get old mempool.
old_mempool = list(cursor.execute('''SELECT * FROM mempool'''))
old_mempool_hashes = [message['tx_hash'] for message in old_mempool]
if backend.MEMPOOL_CACHE_INITIALIZED is False:
backend.init_mempool_cache()
logger.info("Ready for queries.")
# Fake values for fake block.
curr_time = int(time.time())
mempool_tx_index = tx_index
xcp_mempool = []
raw_mempool = backend.getrawmempool()
# this is a quick fix to make unoparty usable on high mempool situations
# however, this makes the mempool unreliable on unoparty, a better, larger
# fix must be done by changing this whole function into a zmq driven loop
if len(raw_mempool) > config.MEMPOOL_TXCOUNT_UPDATE_LIMIT:
continue
# For each transaction in Unobtanium Core mempool, if it’s new, create
# a fake block, a fake transaction, capture the generated messages,
# and then save those messages.
# Every transaction in mempool is parsed independently. (DB is rolled back after each one.)
# We first filter out which transactions we've already parsed before so we can batch fetch their raw data
parse_txs = []
for tx_hash in raw_mempool:
# If already in mempool, copy to new one.
if tx_hash in old_mempool_hashes:
for message in old_mempool:
if message['tx_hash'] == tx_hash:
xcp_mempool.append((tx_hash, message))
# If not a supported XUP transaction, skip.
elif tx_hash in not_supported:
pass
# Else: list, parse and save it.
else:
parse_txs.append(tx_hash)
# fetch raw for all transactions that need to be parsed
# Sometimes the transactions can’t be found: `{'code': -5, 'message': 'No information available about transaction'}`
# - is txindex enabled in Unobtaniumd?
# - or was there a block found while batch feting the raw txs
# - or was there a double spend for w/e reason accepted into the mempool (replace-by-fee?)
try:
raw_transactions = backend.getrawtransaction_batch(parse_txs)
except Exception as e:
logger.warning('Failed to fetch raw for mempool TXs, restarting loop; %s', (e, ))
continue # restart the follow loop
for tx_hash in parse_txs:
try:
with db:
# List the fake block.
cursor.execute('''INSERT INTO blocks(
block_index,
block_hash,
block_time) VALUES(?,?,?)''',
(config.MEMPOOL_BLOCK_INDEX,
config.MEMPOOL_BLOCK_HASH,
curr_time)
)
tx_hex = raw_transactions[tx_hash]
if tx_hex is None:
logger.debug('tx_hash %s not found in backend. Not adding to mempool.', (tx_hash, ))
raise MempoolError
mempool_tx_index = list_tx(db, None, block_index, curr_time, tx_hash, tx_index=mempool_tx_index, tx_hex=tx_hex)
# Parse transaction.
cursor.execute('''SELECT * FROM transactions WHERE tx_hash = ?''', (tx_hash,))
transactions = list(cursor)
if transactions:
assert len(transactions) == 1
transaction = transactions[0]
supported = parse_tx(db, transaction)
if not supported:
not_supported[tx_hash] = ''
not_supported_sorted.append((block_index, tx_hash))
else:
# If a transaction hasn’t been added to the
# table `transactions`, then it’s not a
# Unoparty transaction.
not_supported[tx_hash] = ''
not_supported_sorted.append((block_index, tx_hash))
raise MempoolError
# Save transaction and side‐effects in memory.
cursor.execute('''SELECT * FROM messages WHERE block_index = ?''', (config.MEMPOOL_BLOCK_INDEX,))
for message in list(cursor):
xcp_mempool.append((tx_hash, message))
# Rollback.
raise MempoolError
except exceptions.ParseTransactionError as e:
logger.warn('ParseTransactionError for tx %s: %s' % (tx_hash, e))
except MempoolError:
pass
# Re‐write mempool messages to database.
with db:
cursor.execute('''DELETE FROM mempool''')
for message in xcp_mempool:
tx_hash, new_message = message
new_message['tx_hash'] = tx_hash
cursor.execute('''INSERT INTO mempool VALUES(:tx_hash, :command, :category, :bindings, :timestamp)''', new_message)
elapsed_time = time.time() - start_time
sleep_time = config.BACKEND_POLL_INTERVAL - elapsed_time if elapsed_time <= config.BACKEND_POLL_INTERVAL else 0
logger.getChild('mempool').debug('Refresh mempool: %s XUP txs seen, out of %s total entries (took %ss, next refresh in %ss)' % (
len(xcp_mempool), len(raw_mempool),
"{:.2f}".format(elapsed_time, 3),
"{:.2f}".format(sleep_time, 3)))
# Wait
db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE)
time.sleep(sleep_time)
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
tests/test_client.py | # coding: utf-8
import os
import json
import pytest
import subprocess
import sys
import time
from textwrap import dedent
from sentry_sdk import (
Hub,
Client,
add_breadcrumb,
configure_scope,
capture_message,
capture_exception,
capture_event,
start_transaction,
set_tag,
)
from sentry_sdk.integrations.executing import ExecutingIntegration
from sentry_sdk.transport import Transport
from sentry_sdk._compat import reraise, text_type, PY2
from sentry_sdk.utils import HAS_CHAINED_EXCEPTIONS
from sentry_sdk.serializer import MAX_DATABAG_BREADTH
from sentry_sdk.consts import DEFAULT_MAX_BREADCRUMBS
if PY2:
# Importing ABCs from collections is deprecated, and will stop working in 3.8
# https://github.com/python/cpython/blob/master/Lib/collections/__init__.py#L49
from collections import Mapping
else:
# New in 3.3
# https://docs.python.org/3/library/collections.abc.html
from collections.abc import Mapping
class EventCaptured(Exception):
pass
class _TestTransport(Transport):
def capture_event(self, event):
raise EventCaptured(event)
def test_transport_option(monkeypatch):
if "SENTRY_DSN" in os.environ:
monkeypatch.delenv("SENTRY_DSN")
dsn = "https://[email protected]/123"
dsn2 = "https://[email protected]/124"
assert str(Client(dsn=dsn).dsn) == dsn
assert Client().dsn is None
monkeypatch.setenv("SENTRY_DSN", dsn)
transport = Transport({"dsn": dsn2})
assert text_type(transport.parsed_dsn) == dsn2
assert str(Client(transport=transport).dsn) == dsn
@pytest.mark.parametrize(
"testcase",
[
{
"dsn": "http://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"arg_http_proxy": "http://localhost/123",
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"arg_http_proxy": "https://localhost/123",
"arg_https_proxy": None,
"expected_proxy_scheme": "https",
},
{
"dsn": "http://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"arg_http_proxy": "http://localhost/123",
"arg_https_proxy": "https://localhost/123",
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"arg_http_proxy": "http://localhost/123",
"arg_https_proxy": "https://localhost/123",
"expected_proxy_scheme": "https",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"arg_http_proxy": "http://localhost/123",
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
{
"dsn": "http://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": None,
},
{
"dsn": "http://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": None,
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": "https",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": None,
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": "",
"arg_https_proxy": "",
"expected_proxy_scheme": None,
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": "https",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": None,
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": None,
"arg_https_proxy": "",
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": "",
"arg_https_proxy": None,
"expected_proxy_scheme": "https",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": None,
"arg_https_proxy": "",
"expected_proxy_scheme": None,
},
{
"dsn": "http://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": "https://localhost/123",
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
# NO_PROXY testcases
{
"dsn": "http://[email protected]/123",
"env_http_proxy": "http://localhost/123",
"env_https_proxy": None,
"env_no_proxy": "sentry.io,example.com",
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": None,
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": "https://localhost/123",
"env_no_proxy": "example.com,sentry.io",
"arg_http_proxy": None,
"arg_https_proxy": None,
"expected_proxy_scheme": None,
},
{
"dsn": "http://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"env_no_proxy": "sentry.io,example.com",
"arg_http_proxy": "http://localhost/123",
"arg_https_proxy": None,
"expected_proxy_scheme": "http",
},
{
"dsn": "https://[email protected]/123",
"env_http_proxy": None,
"env_https_proxy": None,
"env_no_proxy": "sentry.io,example.com",
"arg_http_proxy": None,
"arg_https_proxy": "https://localhost/123",
"expected_proxy_scheme": "https",
},
],
)
def test_proxy(monkeypatch, testcase):
if testcase["env_http_proxy"] is not None:
monkeypatch.setenv("HTTP_PROXY", testcase["env_http_proxy"])
if testcase["env_https_proxy"] is not None:
monkeypatch.setenv("HTTPS_PROXY", testcase["env_https_proxy"])
if testcase.get("env_no_proxy") is not None:
monkeypatch.setenv("NO_PROXY", testcase["env_no_proxy"])
kwargs = {}
if testcase["arg_http_proxy"] is not None:
kwargs["http_proxy"] = testcase["arg_http_proxy"]
if testcase["arg_https_proxy"] is not None:
kwargs["https_proxy"] = testcase["arg_https_proxy"]
client = Client(testcase["dsn"], **kwargs)
if testcase["expected_proxy_scheme"] is None:
assert client.transport._pool.proxy is None
else:
assert client.transport._pool.proxy.scheme == testcase["expected_proxy_scheme"]
def test_simple_transport(sentry_init):
events = []
sentry_init(transport=events.append)
capture_message("Hello World!")
assert events[0]["message"] == "Hello World!"
def test_ignore_errors(sentry_init, capture_events):
class MyDivisionError(ZeroDivisionError):
pass
def raise_it(exc_info):
reraise(*exc_info)
sentry_init(ignore_errors=[ZeroDivisionError], transport=_TestTransport())
Hub.current._capture_internal_exception = raise_it
def e(exc):
try:
raise exc
except Exception:
capture_exception()
e(ZeroDivisionError())
e(MyDivisionError())
pytest.raises(EventCaptured, lambda: e(ValueError()))
def test_with_locals_enabled(sentry_init, capture_events):
sentry_init(with_locals=True)
events = capture_events()
try:
1 / 0
except Exception:
capture_exception()
(event,) = events
assert all(
frame["vars"]
for frame in event["exception"]["values"][0]["stacktrace"]["frames"]
)
def test_with_locals_disabled(sentry_init, capture_events):
sentry_init(with_locals=False)
events = capture_events()
try:
1 / 0
except Exception:
capture_exception()
(event,) = events
assert all(
"vars" not in frame
for frame in event["exception"]["values"][0]["stacktrace"]["frames"]
)
@pytest.mark.parametrize("integrations", [[], [ExecutingIntegration()]])
def test_function_names(sentry_init, capture_events, integrations):
sentry_init(integrations=integrations)
events = capture_events()
def foo():
try:
bar()
except Exception:
capture_exception()
def bar():
1 / 0
foo()
(event,) = events
(thread,) = event["exception"]["values"]
functions = [x["function"] for x in thread["stacktrace"]["frames"]]
if integrations:
assert functions == [
"test_function_names.<locals>.foo",
"test_function_names.<locals>.bar",
]
else:
assert functions == ["foo", "bar"]
def test_attach_stacktrace_enabled(sentry_init, capture_events):
sentry_init(attach_stacktrace=True)
events = capture_events()
def foo():
bar()
def bar():
capture_message("HI")
foo()
(event,) = events
(thread,) = event["threads"]["values"]
functions = [x["function"] for x in thread["stacktrace"]["frames"]]
assert functions[-2:] == ["foo", "bar"]
def test_attach_stacktrace_enabled_no_locals(sentry_init, capture_events):
sentry_init(attach_stacktrace=True, with_locals=False)
events = capture_events()
def foo():
bar()
def bar():
capture_message("HI")
foo()
(event,) = events
(thread,) = event["threads"]["values"]
local_vars = [x.get("vars") for x in thread["stacktrace"]["frames"]]
assert local_vars[-2:] == [None, None]
def test_attach_stacktrace_in_app(sentry_init, capture_events):
sentry_init(attach_stacktrace=True, in_app_exclude=["_pytest"])
events = capture_events()
capture_message("hi")
(event,) = events
(thread,) = event["threads"]["values"]
frames = thread["stacktrace"]["frames"]
pytest_frames = [f for f in frames if f["module"].startswith("_pytest")]
assert pytest_frames
assert all(f["in_app"] is False for f in pytest_frames)
assert any(f["in_app"] for f in frames)
def test_attach_stacktrace_disabled(sentry_init, capture_events):
sentry_init(attach_stacktrace=False)
events = capture_events()
capture_message("HI")
(event,) = events
assert "threads" not in event
def test_capture_event_works(sentry_init):
sentry_init(transport=_TestTransport())
pytest.raises(EventCaptured, lambda: capture_event({}))
pytest.raises(EventCaptured, lambda: capture_event({}))
@pytest.mark.parametrize("num_messages", [10, 20])
def test_atexit(tmpdir, monkeypatch, num_messages):
app = tmpdir.join("app.py")
app.write(
dedent(
"""
import time
from sentry_sdk import init, transport, capture_message
def send_event(self, event):
time.sleep(0.1)
print(event["message"])
transport.HttpTransport._send_event = send_event
init("http://foobar@localhost/123", shutdown_timeout={num_messages})
for _ in range({num_messages}):
capture_message("HI")
""".format(
num_messages=num_messages
)
)
)
start = time.time()
output = subprocess.check_output([sys.executable, str(app)])
end = time.time()
# Each message takes at least 0.1 seconds to process
assert int(end - start) >= num_messages / 10
assert output.count(b"HI") == num_messages
def test_configure_scope_available(sentry_init, request, monkeypatch):
# Test that scope is configured if client is configured
sentry_init()
with configure_scope() as scope:
assert scope is Hub.current.scope
scope.set_tag("foo", "bar")
calls = []
def callback(scope):
calls.append(scope)
scope.set_tag("foo", "bar")
assert configure_scope(callback) is None
assert len(calls) == 1
assert calls[0] is Hub.current.scope
@pytest.mark.tests_internal_exceptions
def test_client_debug_option_enabled(sentry_init, caplog):
sentry_init(debug=True)
Hub.current._capture_internal_exception((ValueError, ValueError("OK"), None))
assert "OK" in caplog.text
@pytest.mark.tests_internal_exceptions
@pytest.mark.parametrize("with_client", (True, False))
def test_client_debug_option_disabled(with_client, sentry_init, caplog):
if with_client:
sentry_init()
Hub.current._capture_internal_exception((ValueError, ValueError("OK"), None))
assert "OK" not in caplog.text
def test_scope_initialized_before_client(sentry_init, capture_events):
"""
This is a consequence of how configure_scope() works. We must
make `configure_scope()` a noop if no client is configured. Even
if the user later configures a client: We don't know that.
"""
with configure_scope() as scope:
scope.set_tag("foo", 42)
sentry_init()
events = capture_events()
capture_message("hi")
(event,) = events
assert "tags" not in event
def test_weird_chars(sentry_init, capture_events):
sentry_init()
events = capture_events()
# fmt: off
capture_message(u"föö".encode("latin1"))
# fmt: on
(event,) = events
assert json.loads(json.dumps(event)) == event
def test_nan(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
# should_repr_strings=False
set_tag("mynan", float("nan"))
# should_repr_strings=True
nan = float("nan") # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
frames = event["exception"]["values"][0]["stacktrace"]["frames"]
(frame,) = frames
assert frame["vars"]["nan"] == "nan"
assert event["tags"]["mynan"] == "nan"
def test_cyclic_frame_vars(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
a = {}
a["a"] = a
1 / 0
except Exception:
capture_exception()
(event,) = events
assert event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"] == {
"a": "<cyclic>"
}
def test_cyclic_data(sentry_init, capture_events):
sentry_init()
events = capture_events()
with configure_scope() as scope:
data = {}
data["is_cyclic"] = data
other_data = ""
data["not_cyclic"] = other_data
data["not_cyclic2"] = other_data
scope.set_extra("foo", data)
capture_message("hi")
(event,) = events
data = event["extra"]["foo"]
assert data == {"not_cyclic2": "", "not_cyclic": "", "is_cyclic": "<cyclic>"}
def test_databag_depth_stripping(sentry_init, capture_events, benchmark):
sentry_init()
events = capture_events()
value = ["a"]
for _ in range(100000):
value = [value]
@benchmark
def inner():
del events[:]
try:
a = value # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert len(json.dumps(event)) < 10000
def test_databag_string_stripping(sentry_init, capture_events, benchmark):
sentry_init()
events = capture_events()
@benchmark
def inner():
del events[:]
try:
a = "A" * 1000000 # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert len(json.dumps(event)) < 10000
def test_databag_breadth_stripping(sentry_init, capture_events, benchmark):
sentry_init()
events = capture_events()
@benchmark
def inner():
del events[:]
try:
a = ["a"] * 1000000 # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert (
len(event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"])
== MAX_DATABAG_BREADTH
)
assert len(json.dumps(event)) < 10000
@pytest.mark.skipif(not HAS_CHAINED_EXCEPTIONS, reason="Only works on 3.3+")
def test_chained_exceptions(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
try:
raise ValueError()
except Exception:
1 / 0
except Exception:
capture_exception()
(event,) = events
e1, e2 = event["exception"]["values"]
# This is the order all other SDKs send chained exceptions in. Including
# Raven-Python.
assert e1["type"] == "ValueError"
assert e2["type"] == "ZeroDivisionError"
@pytest.mark.tests_internal_exceptions
def test_broken_mapping(sentry_init, capture_events):
sentry_init()
events = capture_events()
class C(Mapping):
def broken(self, *args, **kwargs):
raise Exception("broken")
__getitem__ = broken
__setitem__ = broken
__delitem__ = broken
__iter__ = broken
__len__ = broken
def __repr__(self):
return "broken"
try:
a = C() # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert (
event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"]
== "<failed to serialize, use init(debug=True) to see error logs>"
)
def test_mapping_sends_exception(sentry_init, capture_events):
sentry_init()
events = capture_events()
class C(Mapping):
def __iter__(self):
try:
1 / 0
except ZeroDivisionError:
capture_exception()
yield "hi"
def __len__(self):
"""List length"""
return 1
def __getitem__(self, ii):
"""Get a list item"""
if ii == "hi":
return "hi"
raise KeyError()
try:
a = C() # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"] == {
"hi": "'hi'"
}
def test_object_sends_exception(sentry_init, capture_events):
sentry_init()
events = capture_events()
class C(object):
def __repr__(self):
try:
1 / 0
except ZeroDivisionError:
capture_exception()
return "hi, i am a repr"
try:
a = C() # noqa
1 / 0
except Exception:
capture_exception()
(event,) = events
assert (
event["exception"]["values"][0]["stacktrace"]["frames"][0]["vars"]["a"]
== "hi, i am a repr"
)
def test_errno_errors(sentry_init, capture_events):
sentry_init()
events = capture_events()
class Foo(Exception):
errno = 69
capture_exception(Foo())
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["mechanism"]["meta"]["errno"]["number"] == 69
def test_non_string_variables(sentry_init, capture_events):
"""There is some extremely terrible code in the wild that
inserts non-strings as variable names into `locals()`."""
sentry_init()
events = capture_events()
try:
locals()[42] = True
1 / 0
except ZeroDivisionError:
capture_exception()
(event,) = events
(exception,) = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
(frame,) = exception["stacktrace"]["frames"]
assert frame["vars"]["42"] == "True"
def test_dict_changed_during_iteration(sentry_init, capture_events):
"""
Some versions of Bottle modify the WSGI environment inside of this __repr__
impl: https://github.com/bottlepy/bottle/blob/0.12.16/bottle.py#L1386
See https://github.com/getsentry/sentry-python/pull/298 for discussion
"""
sentry_init(send_default_pii=True)
events = capture_events()
class TooSmartClass(object):
def __init__(self, environ):
self.environ = environ
def __repr__(self):
if "my_representation" in self.environ:
return self.environ["my_representation"]
self.environ["my_representation"] = "<This is me>"
return self.environ["my_representation"]
try:
environ = {}
environ["a"] = TooSmartClass(environ)
1 / 0
except ZeroDivisionError:
capture_exception()
(event,) = events
(exception,) = event["exception"]["values"]
(frame,) = exception["stacktrace"]["frames"]
assert frame["vars"]["environ"] == {"a": "<This is me>"}
@pytest.mark.parametrize(
"dsn",
[
"http://894b7d594095440f8dfea9b300e6f572@localhost:8000/2",
"http://894b7d594095440f8dfea9b300e6f572@localhost:8000/2",
],
)
def test_init_string_types(dsn, sentry_init):
# Allow unicode strings on Python 3 and both on Python 2 (due to
# unicode_literals)
#
# Supporting bytes on Python 3 is not really wrong but probably would be
# extra code
sentry_init(dsn)
assert (
Hub.current.client.dsn
== "http://894b7d594095440f8dfea9b300e6f572@localhost:8000/2"
)
def test_envelope_types():
"""
Tests for calling the right transport method (capture_event vs
capture_envelope) from the SDK client for different data types.
"""
envelopes = []
events = []
class CustomTransport(Transport):
def capture_envelope(self, envelope):
envelopes.append(envelope)
def capture_event(self, event):
events.append(event)
with Hub(Client(traces_sample_rate=1.0, transport=CustomTransport())):
event_id = capture_message("hello")
# Assert error events get passed in via capture_event
assert not envelopes
event = events.pop()
assert event["event_id"] == event_id
assert "type" not in event
with start_transaction(name="foo"):
pass
# Assert transactions get passed in via capture_envelope
assert not events
envelope = envelopes.pop()
(item,) = envelope.items
assert item.data_category == "transaction"
assert item.headers.get("type") == "transaction"
assert not envelopes
assert not events
@pytest.mark.parametrize(
"sdk_options, expected_breadcrumbs",
[({}, DEFAULT_MAX_BREADCRUMBS), ({"max_breadcrumbs": 50}, 50)],
)
def test_max_breadcrumbs_option(
sentry_init, capture_events, sdk_options, expected_breadcrumbs
):
sentry_init(sdk_options)
events = capture_events()
for _ in range(1231):
add_breadcrumb({"type": "sourdough"})
capture_message("dogs are great")
assert len(events[0]["breadcrumbs"]["values"]) == expected_breadcrumbs
def test_multiple_positional_args(sentry_init):
with pytest.raises(TypeError) as exinfo:
sentry_init(1, None)
assert "Only single positional argument is expected" in str(exinfo.value)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
app/old/repoSetup.py | #!/usr/bin/python3
#region IMPORT
import os
import pathlib
import time
import string
import subprocess
import argparse
import fnmatch
import glob
import sys
import subprocess
import json
import csv
#from tkinter.messagebox import QUESTION
#from msilib.schema import Directory
from itertools import chain, repeat
from pathlib import Path
from os.path import exists
from dotenv import load_dotenv
from pkgutil import iter_modules
from contextlib import nullcontext
#endregion
#region SETUP
env_dict = {}
################################
######## SETUP #################
################################
#### REPO SETUP ####
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) ##!!
root_var = os.environ['HOU_TOOL_REPO'] = ROOT_DIR
env_dict['HOU_TOOL_REPO']=ROOT_DIR
#print(root_var)
#print("root dir " + ROOT_DIR)
dir_list = os.listdir(ROOT_DIR)
HDA_DIR = os.path.join(ROOT_DIR,"HDA") ##!!
env_dict['HDA_DIR']=HDA_DIR
#print(HDA_DIR)
hda_list = os.listdir(HDA_DIR)
#print(hda_list)
### Houdini Package Dir ###
houdini_package_dir = os.path.join(ROOT_DIR,"packages")
#print(HOUDINI_PACKAGE_DIR)
HOUDINI_PACKAGE_DIR_var = os.environ['HOUDINI_PACKAGE_DIR'] = houdini_package_dir ##!!
env_dict['HOUDINI_PACKAGE_DIR']=houdini_package_dir
### OTL Scan path ###
otl_scan = ""
exclude_directories = set(['backup'])
for d in os.listdir(HDA_DIR):
if not d in exclude_directories:
#print(d)
path = os.path.join(HDA_DIR,d)
#print(path)
otl_scan += str(path+':')
otl_scan = otl_scan[:-1]
#print(otl_scan+'&')
#otl_scan = otl_scan + '&'
otl_scan_var = os.environ['HOUDINI_OTLSCAN_PATH'] = otl_scan ##!!
#print(os.environ['HOUDINI_OTLSCAN_PATH'])
env_dict['HOUDINI_OTLSCAN_PATH']=otl_scan
#### ACES SETUP ####
etc_path = os.path.join(ROOT_DIR,"etc_assets")
ocio_path = os.path.join(etc_path,"OpenColorIO-Configs")
ocio_config_path_dir = os.path.join(ocio_path,"aces_1.0.3")
ocio_config_path = os.path.join(ocio_config_path_dir,"config.ocio") ##!!
#print(ocio_config_path)
blender_ocio_path = os.path.join(etc_path,"Blender-config")
blender_ocio_config = os.path.join(blender_ocio_path,"config.ocio") ##!!
#print(blender_ocio_config)
#endregion
#region MAIN METHODS
########################################
############# MAIN METHODS #############
########################################
#REGION MAIN METHODS
#print( '\n'.join([f'{k}: {v}' for k, v in sorted(os.environ.items())]) ) # list all env vars
def Configure_OCIO(standard):
if standard:
ocio_var = os.environ['OCIO']=ocio_config_path
env_dict['OCIO']=ocio_config_path
env_dict['OCIO_BLENDER']=blender_ocio_config
else:
ocio_var = os.environ['OCIO']=blender_ocio_config
env_dict['OCIO']=blender_ocio_config
### How to use info ###
def user_question(prompt):
answers = {"y","n"}
prompts = chain([prompt], repeat("Please answer with y/n: "))
replies = map(input, prompts)
valid_response = next(filter(answers.__contains__, replies))
print(valid_response)
return valid_response
def Env_Configure():
# OCIO
ocio = answer_pred("Do you want to configure ACES? ")
if ocio:
if answer_pred("Do you want the blender config? "):
Configure_OCIO(False)
else:
Configure_OCIO(True)
#### Write to file so system scripts can use it ####
Write_To_File_Controller()
# System Vars
if answer_pred("Do you want to configure system vars?"):
if answer_pred("do you want to unset system vars? "):
pass
if answer_pred("Do you want to set system vars? "):
#configure_system_vars()
pass
# PRINT TO FILE
# if answer_pred("Do you want to print vars to file? "):
# Write_To_File_Controller()
### Set Up System Variables
def configure_system_vars(set):
write = set
### SET System ENVIORNMENT ####
from sys import platform
if platform == "linux" or platform == "linux2":
# linux
print("Congrats! You are on linux!")
val = subprocess.check_call("chmod +x setenv.sh" , shell=True)
pass
elif platform == "darwin":
# OS X
print("You are on OSX")
val = subprocess.check_call("chmod +x setenv.sh" , shell=True)
pass
elif platform == "win32":
# Windows...
print("You are, unfortunately on Windows...")
val = subprocess.check_call("./setenv.bat" , shell=True)
pass
else:
print("I don't know what system you're on...")
pass
#endregion
#region HELPER METHODS
##########################################
########## HELPER METHODS ################
##########################################
def question_predicate(answer):
#print(valid_response)
if answer == "y":
value = True
elif answer == "n":
value = False
return value
def answer_pred(prompt):
answer = user_question(prompt)
value = question_predicate(answer)
return value
def Write_To_File_Controller():
env_path = pathlib.Path("tools.env")
json_path = pathlib.Path("tools.json")
line_to_write = ""
### Write to Env
if env_path.exists():
print("env file exists")
#Write to existing file
Write_To_Env_File(env_path)
else:
print("does not exist")
# Create File
with open('tools.env', 'w') as fp:
pass
Write_To_Env_File(env_path)
#### Write to JSON
if json_path.exists():
print("json exists")
Write_To_Json(json_path)
else:
print("creating json file")
with open("tools.json","w") as fp:
pass
Write_To_Json(json_path)
def Write_To_Env_File(file_path):
print(file_path)
with open(file_path, 'w') as f:
for key, value in env_dict.items():
f.write('%s="%s:&"\n' % (key, value))
def Write_To_Json(file_path):
# f = open(file_path, "w")
# d = json.dumps(env_dict, indent=4)
# json.dump(d,f)
# f.close()
with open(file_path,'w') as fp:
json.dump(env_dict,fp, indent=4,sort_keys=True)
### Convert paths to system ###
def Convert_Path_To_OS():
from sys import platform
if platform == "linux" or platform == "linux2":
# linux
print("Congrats! You are on linux!")
pass
elif platform == "darwin":
# OS X
print("You are on OSX")
pass
elif platform == "win32":
# Windows...
print("You are, unfortunately on Windows...")
pass
else:
print("I don't know what system you're on...")
pass
#endregion
#region EXECUTE
####################################
############ EXECUTE ###############
####################################
def Main():
#init()
Env_Configure()
#print(env_dict)
#Write_To_File_Controller()
Main()
#endregion
# PRINT vals and keys of dictionary
# for key, value in env_dict.items():
# print(key,'=',value)
| []
| []
| [
"HOUDINI_PACKAGE_DIR",
"OCIO",
"HOU_TOOL_REPO",
"HOUDINI_OTLSCAN_PATH"
]
| [] | ["HOUDINI_PACKAGE_DIR", "OCIO", "HOU_TOOL_REPO", "HOUDINI_OTLSCAN_PATH"] | python | 4 | 0 | |
platform/util/testSrc/com/intellij/util/EnvironmentUtilTest.java | // Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.util;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.io.IoTestUtil;
import org.junit.Test;
import java.io.File;
import java.util.Arrays;
import java.util.Map;
import static com.intellij.openapi.util.io.IoTestUtil.assumeUnix;
import static org.junit.Assert.*;
public class EnvironmentUtilTest {
@Test(timeout = 30000)
public void map() {
assertNotNull(EnvironmentUtil.getEnvironmentMap());
}
@Test
public void path() {
assertNotNull(EnvironmentUtil.getValue("PATH"));
if (SystemInfo.isWindows) {
assertNotNull(EnvironmentUtil.getValue("Path"));
}
}
@Test
public void parse() {
String text = "V1=single line\0V2=multiple\nlines\0V3=single line\0PWD=?\0";
Map<String, String> map = EnvironmentUtil.testParser(text);
assertEquals("single line", map.get("V1"));
assertEquals("multiple\nlines", map.get("V2"));
assertEquals("single line", map.get("V3"));
if (System.getenv().containsKey("PWD")) {
assertEquals(System.getenv("PWD"), map.get("PWD"));
assertEquals(4, map.size());
}
else {
assertEquals(3, map.size());
}
}
@Test(timeout = 30000)
public void load() {
assumeUnix();
Map<String, String> env = EnvironmentUtil.testLoader();
assertTrue(env.size() >= System.getenv().size() / 2);
}
@Test(timeout = 30000)
public void loadingBatEnv() throws Exception {
IoTestUtil.assumeWindows();
File file = FileUtil.createTempFile("test", ".bat", true);
FileUtil.writeToFile(file, "set FOO_TEST_1=123\r\nset FOO_TEST_2=%1");
Map<String, String> result = new EnvironmentUtil.ShellEnvReader().readBatEnv(file, Arrays.asList("arg_value"));
assertEquals("123", result.get("FOO_TEST_1"));
assertEquals("arg_value", result.get("FOO_TEST_2"));
}
@Test(timeout = 30000)
public void loadingBatEnv_ErrorHandling() throws Exception {
IoTestUtil.assumeWindows();
File file = FileUtil.createTempFile("test", ".bat", true);
FileUtil.writeToFile(file, "echo some error\r\nexit /B 1");
try {
new EnvironmentUtil.ShellEnvReader().readBatEnv(file, Arrays.asList());
fail("error should be reported");
}
catch (Exception e) {
assertTrue(e.getMessage(), e.getMessage().contains("some error"));
}
}
} | [
"\"PWD\""
]
| []
| [
"PWD"
]
| [] | ["PWD"] | java | 1 | 0 | |
flenv_test.go | package flenv
import (
"os"
"testing"
)
type config struct {
Host string `env:"HOST" default:"localhost" flag:"-h,--host" help:"Host to bind to"`
Port int `env:"PORT" default:"80" flag:"-p,--port" help:"Port to listen on"`
}
func TestFlenvDecode(t *testing.T) {
oldHost := os.Getenv("HOST")
oldPort := os.Getenv("PORT")
os.Setenv("HOST", "sigusr2.net")
os.Setenv("PORT", "8000")
defer func() {
os.Setenv("HOST", oldHost)
os.Setenv("PORT", oldPort)
}()
var cfg config
_, err := Decode(&cfg, []string{})
if err != nil {
t.Fatalf("Unexpected error, got %q", err)
}
if cfg.Host != "sigusr2.net" {
t.Fatalf("Expected host to be \"sigusr2.net\", got %q", cfg.Host)
}
if cfg.Port != 8000 {
t.Fatalf("Expected port to be \"8000\", got %q", cfg.Port)
}
}
func TestFlenvDecodeArgs(t *testing.T) {
var cfg config
fs, err := Decode(&cfg, []string{"-h", "sigusr2.net", "-p", "8000"})
if err != nil {
t.Fatalf("Unexpected error, got %q", err)
}
if cfg.Host != "sigusr2.net" {
t.Fatalf("Expected host to be \"sigusr2.net\", got %q", cfg.Host)
}
if cfg.Port != 8000 {
t.Fatalf("Expected port to be \"8000\", got %q", cfg.Port)
}
}
func TestFlenvDecodeArgsWithEnv(t *testing.T) {
oldHost := os.Getenv("HOST")
oldPort := os.Getenv("PORT")
os.Setenv("HOST", "sigusr2.net")
os.Setenv("PORT", "8000")
defer func() {
os.Setenv("HOST", oldHost)
os.Setenv("PORT", oldPort)
}()
var cfg config
_, err := Decode(&cfg, []string{})
if err != nil {
t.Fatalf("Unexpected error, got %q", err)
}
if cfg.Host != "sigusr2.net" {
t.Fatalf("Expected host to be \"sigusr2.net\", got %q", cfg.Host)
}
if cfg.Port != 8000 {
t.Fatalf("Expected port to be \"8000\", got %q", cfg.Port)
}
}
func TestFlenvDecodeNonStruct(t *testing.T) {
var i int
if _, err := Decode(&i, []string{}); err != ErrNotStruct {
t.Fatalf("Expected error when decoding non-struct value.")
}
}
| [
"\"HOST\"",
"\"PORT\"",
"\"HOST\"",
"\"PORT\""
]
| []
| [
"PORT",
"HOST"
]
| [] | ["PORT", "HOST"] | go | 2 | 0 | |
PHENOS/phenos/core.py | #!/usr/bin/env python -tt
# -*- coding: utf-8 -*-
#STANDARD LIBRARY
import os,sys,time,shutil,subprocess
import logging,platform,ConfigParser,traceback
import numpy as np
from itertools import chain
from math import e
from collections import defaultdict, Counter
#OTHER
from matplotlib import use as mpluse
mpluse('PS')
import matplotlib.pyplot as pyplt
import win32com.client
# #############################################################################
filename = os.path.basename(__file__)
authors = ("David B. H. Barton")
version = "2.7"
LOG=logging.getLogger()
#
#UTILITY LAMBDAS ##############################################################
#flattens a nested list, e.g. flatten([[1,2],[3,4]]) returns [1,2,3,4]
flatten=lambda nested: list(chain.from_iterable(nested))
#combines two pairs of timepoints & measurements and returns in timepoint
#order e.g.
# tzip([0,2],[30,40],[1,7],[35,100]) returns [(0,1,2,7),(30,35,40,100)]
tzip=lambda t1,m1,t2,m2: zip(*sorted(zip(t1,m1)+zip(t2,m2)))
ATOMORNOT=lambda(aon): getattr(aon,"value",aon)
def cellcount_estimate(rawmeasuredminusagar):
"""
log10(P)=0.7625(A)+4.8914
P=77875.3e(1.75572A)
Determined empirically using cell counts
and least squares curve fitting
"""
A=rawmeasuredminusagar
return 77875.3*e**(1.75572*A)
def calc_slope(measures,timepoints):
"""
The average slope of the measures and timepoints, averaged across every difference
in the set provided to avoid problems with noisy data
"""
try:
mD=delta_series(measures,k=2)
tD=delta_series(timepoints,k=2)
allslopes=[m/float(t) for m,t in zip(mD,tD)]
return sum(allslopes)/len(allslopes)
except Exception as e:
LOG.error("Can't calculate slope for {}/{}"
"because {} {}".format(str(measures),str(timepoints),e,get_traceback()))
return None
def calc_lag(slope,measureinflection,minimum,timeinflection):
"""
Taking the slope at the inflection point (the steepest part
of the curve), and tracing that slope back until it reaches
the value of the minimum measurement, gives the lag time.
"""
if not slope:
return None
try:
measureinflectionchange=measureinflection-minimum
timeoflinescrossing=measureinflectionchange/slope
timeofslopecrossingminimum=timeinflection-timeoflinescrossing
except Exception as e:
LOG.error("Can't calculate lag for slope {}, measureinflection {}, "
"minimum {}, timeinflection {} "
"because {} {}".format(slope,measureinflection,
minimum,timeinflection,
e,get_traceback()))
return None
if np.isinf(timeofslopecrossingminimum): return None
return timeofslopecrossingminimum
def intervals_check(timepoints,bounds=(0.1,1)):
intervals=["{:.2f}".format(y-x) for x,y in get_kmer_list(timepoints,k=2)]
N=float(len(intervals))
IC=Counter(intervals)
#As long as 95% of intervals of equal, then proceed
percents={k:v/N for k,v in IC.items()}
if max(percents.values())<0.95:
LOG.error("> 5% of time value intervals are not the "
"most common value ({})".format(str(percents)))
return False
#As long as any abnormal time intervals are within the first
#or last 5% of the sequence, then proceed
ranked=IC.most_common()
#if most common time interval is outside usual bounds then abort
ti,cnt=ranked[0]
Fti=float(ti)
if Fti>bounds[1] or Fti<bounds[0]:
LOG.error("most common time interval ({}) is outside "
"bounds ({})".format(ti,str(bounds)))
return False
#if less common time intervals are not with 5% of either end
if len(ranked)>1:
subintervals=intervals[int(N*0.05):int(N*0.95)]
for t,c in ranked[1:]:
if t in subintervals:
LOG.error("uncommon time interval ({}) found "
"within central 90% of time values"
.format(t))
return False
return True
def calc_inflection(measurements,timevalues,smoothing=15):
output={}
M=output["M"]=measurements
C=output["C"]=[cellcount_estimate(m) for m in M]
T=output["T"]=timevalues
if not intervals_check(T):
LOG.warning("time values intervals check failed,"
"therefore aborting calculations early")
return output
sM=output["sM"]=smooth_series(M,k=smoothing)
sT=output["sT"]=smooth_series(T,k=smoothing)
DsM=output["DsM"]=delta_series(sM)
DsT=output["DsT"]=smooth_series(sT,k=2)
if not DsM:
LOG.warning("smoothed measurements don't give valid delta "
"values, therefore aborting "
"calculations early")
return output
sDsM=output["sDsM"]=smooth_series(DsM,k=2)
sDsT=output["sDsT"]=smooth_series(DsT,k=2)
#sDsMpeakI=output["sDsMpeakI"]=find_first_peak(sDsM)
sDsMpeakI=output["sDsMpeakI"]=sDsM.index(max(sDsM))
if not sDsMpeakI:
LOG.warning("not getting valid sDsMpeakI therefore aborting "
"calculations early")
return output
sDsMpeakM=output["sDsMpeakM"]=sDsM[sDsMpeakI]
sDsTpeakT=output["sDsTpeakT"]=sDsT[sDsMpeakI]
iMTi=output["iMTi"]=closest_index(T,sDsTpeakT)
inflectionT=output["inflectionT"]=T[iMTi]
inflectionM=output["inflectionM"]=M[iMTi]
#take segment of line surrounding inflection point and
slopewindow=4
leftindex=iMTi-slopewindow
rightindex=iMTi+slopewindow
Msub=M[leftindex:rightindex+1]
Csub=C[leftindex:rightindex+1]
Tsub=T[leftindex:rightindex+1]
#print "1: {} ({}) @ {}".format(M1,C1,T1)
#print "2: {} ({}) @ {}".format(M2,C2,T2)
maxslope=output["maxMslope"]=calc_slope(Msub,Tsub)
Cslope=output["maxCslope"]=calc_slope(Csub,Tsub)
minminusagar=min(M)
maxchange=max(M)-minminusagar
#slopeC=cellcount_estimate(self.slope)
#print "MI {}, TI {}".format(self.measureinflection,
# self.timeinflection)
#print "Slopes {} ({})".format(self.slope,Cslope)#,slopeC)
lagtime=output["lagtime"]=calc_lag(maxslope,inflectionM,
minminusagar,inflectionT)
#print "lag {} hrs".format(self.lag)
halfmaxchange=maxchange/2.0
halfmaxchangeindex=closest_index(M,halfmaxchange)
halfpeaktime=output["halfpeaktime"]=T[halfmaxchangeindex]
return output
def doubling_time(slope):
"""
NOT YET IMPLEMENTED
slope (change_in_rawmeasuredvalueminusagar / change_in_time)
"""
cellcountslope=cellcount_estimate(slope)
def get_kmer_list(iterable,k=2):
"""reduces len of iterable by k-1"""
return [iterable[x:x+k] for x in range(len(iterable)+1-k)]
def intervals(values,upto=False):
if upto:
values=values[:upto]
intervals=[y-x for x,y in get_kmer_list(values,k=2)]
return min(intervals),max(intervals)
def smooth_series(iterable,k=2):
"""reduces len of iterable by k-1"""
avg=lambda L:float(sum(L))/k
return [avg(i) for i in get_kmer_list(iterable,k=k)]
def antimirror_before_zero(iterable):
"""
to avoid problems with slope-finding algorithm,
any initial dips in the curve are replaced with negative mirrors of the readings
after the zero, provided that the zero occurs within the first half of the sequence
"""
zeroindex=iterable.index(0.0)
if zeroindex>len(iterable)/2.0:
return iterable
segment_to_antimirror=iterable[zeroindex+1:(zeroindex*2)+1]
negatives=[-v for v in segment_to_antimirror[::-1]]
return negatives+iterable[zeroindex:]
def delta_series(iterable,k=2):
delta=lambda L:L[-1]-L[0]
return [delta(i) for i in get_kmer_list(iterable,k=k)]
def find_first_peak(iterable):
lv=-100000
for i,v in enumerate(iterable):
if v<=lv: return i-1
lv=v
def yield_contiguous_index_ranges(lst):
"""
Returns sets of slices of numbers that are contiguous in a list
e.g. [1,2,3,4,6,7,8] would return
((1,5),(6,9))
"""
lastSTART=lst[0]
lastEND=lst[0]
for i in lst[1:]:
if i==lastEND+1:
lastEND=i
else:
yield (lastSTART,lastEND+1)
lastSTART=i
lastEND=i
lastEND=i
yield (lastSTART,lastEND+1)
def get_chrcumulative():
"""
Returns dictionary of bp additions to be added to bp coordinates
of features on a given chromosome to tranform them into genome-wide
coordinates.
Used by graphics.py when plotting QTLs/Features along
the length of the whole genome
>>> print get_chrcumulative()[3]
1043402
"""
if "chrcumulative" in globals():
return globals()["chrcumulative"]
else:
global chrcumulative
chrcumulative={}
chrlengths={1:230218,
2:813184,
3:316620,
4:1531933,
5:576874,
6:270161,
7:1090940,
8:562643,
9:439888,
10:745751,
11:666816,
12:1078177,
13:924431,
14:784333,
15:1091291,
16:948066}
keys=sorted(chrlengths.keys())
for i,c in enumerate(keys):
previouschrs=keys[:i]
chrcumulative[c]=sum([chrlengths[x] for x in previouschrs])
return chrcumulative
def display_image(filepath,**kwargs):
size=kwargs.setdefault("size",(18,12))
im = pyplt.imread(filepath)
fig, ax = pyplt.subplots(figsize=size)
implot = ax.imshow(im,aspect="auto")
pyplt.axis('off')
pyplt.show()
pyplt.close()
def sorter(iterable,operationfunction):
dd=defaultdict(list)
for each in iterable:
dd[operationfunction(each)].append(each)
return dd
def fromRoman(romannumeralstring):
"""
https://github.com/enthought/Python-2.7.3/blob/master/Doc/tools/roman.py
"""
romannumeralstring=romannumeralstring.upper()
romanNumeralMap=(('M', 1000),
('CM',900),
('D', 500),
('CD',400),
('C', 100),
('XC',90),
('L', 50),
('XL',40),
('X', 10),
('IX',9),
('V', 5),
('IV',4),
('I', 1))
result=0
index=0
for numeral,integer in romanNumeralMap:
while romannumeralstring[index:index+len(numeral)]==numeral:
result+=integer
index+=len(numeral)
return result
def closest_index(lst,value):
"""
Returns the index of the closest value to 'value' in lst
"""
return min(range(len(lst)), key=lambda i: abs(lst[i]-value))
def get_indices_around(lst,centervalue,plusminus=0.5):
output=[]
for i,v in enumerate(lst):
if centervalue-plusminus<=v<=centervalue+plusminus:
output.append(i)
return output
def indices_to_values(lst,indices):
return [lst[i] for i in indices]
def get_allnone_mask(list_of_lists):
"""
returns the indices of every position that is None in every
sublist. Used to filter out all-None columns from markers and
alleles
"""
output=[]
index=0
while True:
try:
if not any([lst[index] for lst in list_of_lists]):
output.append(index)
index+=1
except IndexError:
break
return output
def mask_by_index(lst,indices_to_skip):
return [v for i,v in enumerate(lst) if i not in indices_to_skip]
def padded_display_from_headers(lst,headers,rowclip=300):
padblocks=["{"+":^{}".format(len(header)+2)+"}" for header in headers]
lst=[pad.format(element) for pad,element in zip(padblocks,lst)]
return "".join(lst)[:rowclip]
def reconcile_dicts(*dicts,**kwargs):
"""
combines all dicts into one.
If flag=True then prints errors for each duplicate key
If flag=False, renames duplicate keys with index of dict in brackets,
e.g. "key (0)"
"key (1)"
But if collapse=True, keys will be combined if the values are the same
>>> d1={'a':1,'b':2,'c':3,'d':4}
>>> d2={'a':1,'b':4,'c':3,'D':4}
>>> print reconcile_dicts(d1,d2,flag=False,collapse=True)
{'a': 1, 'c': 3, 'd': 4, 'b (1)': 4, 'b (0)': 2, 'D': 4}
"""
flag=kwargs.pop("flag",True)
collapse=kwargs.pop("collapse",True)
#First find duplicate keys
combineddict={}
for i,dct in enumerate(dicts):
for k,v in dct.items():
if k not in combineddict:
combineddict[k]=[(i,v)]
else:
combineddict[k].append((i,v))
#Now decide what to do
output={}
for k,ivpairs in combineddict.items():
if len(ivpairs)==1:
output[k]=ivpairs[0][1]
else:
if flag==True:
LOG.warning("Key '{}' is duplicated: {}"
.format(k,dict(ivpairs)))
values=list(set([v for i,v in ivpairs]))
if collapse is True and len(values)==1:
output[k]=values[0]
else:
for i,v in ivpairs:
output["{} ({})".format(k,i)]=v
return output
def filterdict(dictionary,keys=[]):
"""
Returns a dict taken from dictionary but only with the keys in keys
>>> print filterdict({'a':1,'b':2,'c':3},['a','b'])
{'a': 1, 'b': 2}
"""
return {k:v for k,v in dictionary.items() if k in keys}
def scriptdir():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def chscriptdir():
os.chdir(scriptdir())
def find_rootdir(searchdir=None):
if searchdir is None:
searchdir=os.path.dirname(os.path.realpath(sys.argv[0]))
rootdir=False
shutdown=0
while not rootdir:
if os.path.exists(os.path.join(searchdir,"Logs")):
rootdir=searchdir
else:
searchdir=os.path.split(searchdir)[0]
if not searchdir:
break
shutdown+=1
if shutdown>100:
break
return rootdir
def get_config_filepath(filename="config.txt"):
"""
Thanks Tom Walsh for this!
If the config file doesn't exist at the proper location,
but does exist in the script directory or level above
then it will be copied into the proper location.
A shortcut will also be created.
"""
platform_system=platform.system()
configfilename=filename
if platform_system!='Windows':
raise RuntimeError("unsupported platform: {!r}".format(platform_system))
else:
appdata=os.getenv('APPDATA')
if appdata is None or not os.path.isdir(appdata):
raise RuntimeError("%APPDATA% environment variable is invalid or undefined")
config_filepath=os.path.join(appdata,'PHENOS',configfilename)
if os.path.exists(config_filepath):
try:
LOG.info("Found config file at {}".format(config_filepath))
except:
pass
return config_filepath
else:
#LOOK FOR CONFIG IN OR ABOVE SCRIPT DIRECTORY
setup_config_txt(destinationpath=config_filepath)
return config_filepath
def get_desktop_dirpath():
platform_system=platform.system()
if platform_system!='Windows':
raise RuntimeError("unsupported platform: {!r}".format(platform_system))
else:
return os.path.normpath(os.path.expanduser('~\Desktop'))
def setup_config_txt(destinationpath):
"""
If necessary, copies filename from above scriptdir to appdata
folder and creates shortcut from that to the desktop
"""
appdatadir,filename=os.path.split(destinationpath)
SCD=scriptdir()
possible=[os.path.join(SCD,filename),
os.path.join(os.path.dirname(SCD),filename)]
foundpath=None
for P in possible:
LOG.info("Looking for {}"
.format(P))
if os.path.exists(P):
foundpath=P
break
if foundpath:
copy_to(foundpath,destinationpath)
LOG.info("Copied {} from {} to {}"
.format(filename,foundpath,destinationpath))
desktopshortcutpath=os.path.join(get_desktop_dirpath(),
"Shortcut to {}.lnk"
.format(filename))
create_Windows_shortcut(destinationpath,
desktopshortcutpath,
report=True)
else:
LOG.critical("Can't find {} in {} or {}"
.format(filename,foundpath,config_filepath))
sys.exit()
def get_config_dict():
CFpth=get_config_filepath()
CFpars=ConfigParser.SafeConfigParser()
CFpars.optionxform = str #prevents section header cases from being changed
def safeget(section,defaultheader,defaultcontent):
if not CFpars.has_section(section):
return None
try:
return CFpars.get(section,defaultheader,defaultcontent)
except ConfigParser.NoOptionError:
return defaultcontent
def getall(section,default):
if not CFpars.has_section(section):
return None
return dict(CFpars.items(section))
CFpars.read(CFpth)
def splitcontrols(controlsstring):
return [c.strip() for c in controlsstring.split(",")]
def splitnumbers(numberstring):
return tuple([int(n.strip()) for n in numberstring.split(",")])
def splitvalues(dictionary):
return {k:[i.strip() for i in v.split(",")]
for k,v in dictionary.items()}
output={"config_filepath":CFpth,
"configparser":CFpars,
"scriptdirectory":scriptdir(),
"target_directory":safeget("Locations",
"target_directory",
find_rootdir()),
"source_directory":safeget("Locations",
"source_directory",
None),
"user_folder":safeget("Locations",
"user_folder",
"Test"),
"graphicstype":safeget("Graphics",
"type",
"png"),
"plotignore":safeget("Graphics",
"plotignore",
"True"),
"windowposition":splitnumbers(safeget("GUI",
"position",
"800,600,0,0")),
"controls":splitcontrols(safeget("Controls",
"controls",
"YPD, YPD 30C, "
"COM, COM 30C")),
"phenotypecalculators":splitvalues(getall("PhenotypeCalculators",
{"!default":"AverageWithoutAgarAtTimeCalc"})),
"combifilevisualizations":splitvalues(getall("CombiFileVisualizations",
{"!default":"EmptyPlateView, "
"PrintingQuality, "
"FinalGrowth, Animation_Temp, "
"CurvesWithoutAgar_PrintedMass, "
"CurvesWithoutAgar_Groups, "
"CurvesWithoutAgar_Slopes, "
"CurvesWithoutAgar_Lags, "
"CurvesNormalized_PrintedMass, "
"Histogram_MaxChange, "
"Scatterplot_PlatedMass_Lag, "
"ReplicatePlots"}))}
if "phenotypecalculators" in output:
pc2={}
for k,v in output["phenotypecalculators"].items():
if k.startswith("!"):
pass
elif not k.endswith("$"):
k=k+"$"
pc2[k]=v
output["phenotypecalculators"]=pc2
return output
def check_and_fix_paths(create_userfolders=True):
CD=get_config_dict()
if not os.path.exists(CD["target_directory"]):
try:
prepare_path(CD["target_directory"])
except Exception as e:
raise RuntimeError("target_directory {} doesn't exist "
"and PHENOS can't create it"
.format(CD["target_directory"],e,
get_traceback()))
if not os.path.exists(CD["source_directory"]):
try:
prepare_path(CD["source_directory"])
print ("source_directory {} doesn't exist so "
"creating it. Ensure microplate reader "
"is set up to output to this location"
.format(CD["source_directory"]))
except Exception as e:
raise RuntimeError("source_directory {} doesn't exist "
"and PHENOS can't create it because {} {}"
.format(CD["source_directory"],e,
get_traceback()))
fulluserpath=os.path.join(CD["target_directory"],
"Data files",
CD["user_folder"])
if not os.path.exists(fulluserpath):
if create_userfolders:
try:
prepare_path(fulluserpath)
except Exception as e:
raise RuntimeError("user_folder {} doesn't exist "
"and PHENOS can't create it because {} {}"
.format(fulluserpath,e,get_traceback()))
else:
tryfolders=["All","Test","New folder"]
for tryfolder in tryfolders:
trypath=os.path.join(CD["target_directory"],
"Data files",
tryfolder)
if os.path.exists(trypath):
CD["user_folder"]=tryfolder
return CD
prepare_path(os.path.join(CD["target_directory"],
"Data files",
tryfolder))
CD["user_folder"]=tryfolder
return CD
def yield_subpaths(startpath,dig=True,onlytype="all",includeroot=True):
if dig:
for root,dirs,files in os.walk(startpath,topdown=True):
if not includeroot:
root=os.path.normpath(root.replace(startpath,''))
if root.startswith(os.path.sep):
root=root[1:]
if onlytype in ["all","files"]:
for name in files:
yield os.path.join(root,name)
if onlytype in ["all","dirs"]:
for name in dirs:
yield os.path.join(root,name)
else:
for subpath in os.listdir(startpath):
fullpath=os.path.join(startpath,subpath)
if not includeroot:
output=fullpath.replace(startpath,'')
if output.startswith(os.path.sep):
output=output[1:]
else:
output=fullpath
if onlytype in ["files"]:
if os.path.isfile(fullpath):
yield output
elif onlytype in ["dirs"]:
if os.path.isdir(fullpath):
yield output
elif onlytype in ["all"]:
yield output
def examine_path(filepath,clip=260):
"""
>>> chscriptdir()
>>> d=examine_path("dbtypes.py")
>>> print d['extension']
.py
>>> print d['filename']
dbtypes.py
"""
filepath=os.path.normpath(filepath)
cwd=os.getcwd()
directory,filename=os.path.split(filepath)
filenamebody,extension=os.path.splitext(filename)
exists=os.path.exists(filepath)
iscomplete= cwd==filepath[:len(cwd)]
badchars=set(filename)-set(" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN"
"OPQRSTUVWXYZ0123456789"
".,_+-=;!^~()[]'@&#%$\\/")
FP=os.path.join(cwd,filepath) if not iscomplete else filepath
return {"filepath":filepath,
"length":len(filepath),
"filenamebody":filenamebody,
"extension":extension,
"filename":filename,
"directory":directory,
"exists":exists,
"badchars":list(badchars),
"isvalid":len(badchars)==0 and len(filepath)<=clip,
"size":os.path.getmtime(filepath) if exists else None,
"datemodified":os.path.getsize(filepath) if exists else None,
"iscomplete":iscomplete,
"workingdirectory":cwd,
"fullpath":FP,
"scriptdirectory":os.path.dirname(os.path.realpath(sys.argv[0]))}
def prepare_path(dpath,report=False):
"""
creates all necessary subdirectories to ensure that filepath can
then be created.
dpath must be a directory.
"""
if not os.path.exists(dpath):
try:
os.makedirs(dpath)
if report:
LOG.info("created {}".format(dpath))
return dpath
except Exception as e:
LOG.critical("couldn't create {} because {} {}"
.format(dpath,e,get_traceback()))
return False
def copy_to(filepath,targetpath,report=True):
"""
N.B. Ensure targetpath exists if it is a directory>
If it IS a directory, shutil.copy will keep the basename
of the original filepath
"""
if not os.path.exists(filepath):
return False
prepare_path(os.path.dirname(targetpath))
shutil.copy(filepath,targetpath)
if report:
LOG.info("copy created: {}".format(targetpath))
return os.path.exists(targetpath)
def copy_contents_to(sourcedirectory,targetdirectory,report=True,
ignore=[".lnk"]):
assert os.path.exists(sourcedirectory)
prepare_path(targetdirectory)
for subpath in yield_subpaths(sourcedirectory,dig=True,onlytype="all",
includeroot=False):
fullsourcepath=os.path.join(sourcedirectory,subpath)
fulltargetpath=os.path.join(targetdirectory,subpath)
if os.path.isdir(fullsourcepath):
prepare_path(fulltargetpath)
else:
ext=os.path.splitext(fulltargetpath)[-1]
if os.path.exists(fulltargetpath):
LOG.error("already exists: {}".format(fulltargetpath))
elif ext in ignore:
LOG.info("ignoring {}".format(fulltargetpath))
else:
try:
shutil.copy(fullsourcepath,fulltargetpath)
if report:
LOG.info("copied {} to {}"
.format(fullsourcepath,fulltargetpath))
except Exception as e:
LOG.error("shutil.copy({},{}) failed{} {}"
.format(fullsourcepath,fulltargetpath,
e,get_traceback()))
def check_path(filepath,
replace_bad=True,
clip_path=True,
create_directory=True,
replace_char="~",
clip=260):
"""
Paths longer than 260 characters produce errors, so this will check and correct them,
in addition to doing character replacement and creating directories if needed
"""
filepath=os.path.normpath(filepath)
check=examine_path(filepath,clip=clip)
if check["badchars"]:
if replace_bad:
for char in check["badchars"]:
check["filename"]=check["filename"].replace(char,replace_char)
check["filepath"]=os.path.join(check["directory"],
check["filename"])
else:
return False
if check["length"]>clip:
if clip_path:
LOG.debug(check["extension"])
clip=clip-(len(check["extension"])+1)
FPMX,EXT=os.path.splitext(check["filepath"])
FPMXC=FPMX[:clip]+"~"
check["filepath"]=FPMXC+EXT
else:
return False
if not os.path.exists(check["directory"]):
if create_directory:
prepare_path(check["directory"])
return check["filepath"]
def get_class_by_name(name):
"""
>>> c=get_class_by_name("DATReaderWithoutTemp")
>>> print c.__name__
DATReaderWithoutTemp
WARNING Doesn't work from inside other modules!
"""
return globals().get(name,None)
def get_newlogpath():
logfolder=os.path.join(check_and_fix_paths()["target_directory"],
"Logs")
prepare_path(logfolder)
pp=os.path.join(logfolder,
"phenos{}.log".format(time.strftime("%y%m%d%H%M%S")))
return pp
def create_Windows_shortcut(targetpath,locationpath,report=False):
try:
shell=win32com.client.Dispatch("WScript.Shell")
shortcut=shell.CreateShortCut(locationpath)
shortcut.Targetpath=targetpath
shortcut.save()
if report:
LOG.info("created shortcut to {} in {}"
.format(targetpath,locationpath))
except Exception as e:
LOG.error("failed to create shortcut to {} in {} because {} {}"
.format(targetpath,locationpath,e,get_traceback()))
def open_on_Windows(somepath):
try:
if os.path.isdir(somepath):
subprocess.Popen('explorer "{}"'.format(somepath))
else:
subprocess.Popen('notepad "{}"'.format(somepath))
except:
LOG.error("couldn't open {}".format(somepath))
def log_uncaught_exceptions(*exc_args):
"""
This, once set at sys.excepthook, makes sure uncaught exceptions
are saved to the log.
"""
exc_txt=''.join(traceback.format_exception(*exc_args))
LOG.error("Unhandled exception: %s",exc_txt)
#logging.shutdown()
def get_traceback():
return ''.join(traceback.format_exception(*sys.exc_info()))
def setup_logging(level="INFO",
fileformat='%(levelname)s [ln %(lineno)d, '
'%(module)s.%(funcName)s] %(message)s [%(asctime)s]\n',
#stdoutformat='%(message)s\n'):
stdoutformat='%(levelname)s [ln %(lineno)d, '
'%(module)s.%(funcName)s] %(message)s [%(asctime)s]\n'):
"""
https://docs.python.org/2/howto/logging.html#logging-basic-tutorial
http://stackoverflow.com/questions/5296130/restart-logging-to-a-new-file-python
"""
if level is None:
LOGLEVEL=logging.INFO#DEBUG
elif type(level)==str:
LOGLEVEL={"DEBUG":logging.DEBUG,
"INFO":logging.INFO,
"WARNING":logging.WARNING,
"ERROR":logging.ERROR,
"CRITICAL":logging.CRITICAL}[level]
else:
LOGLEVEL=level
filepath=get_newlogpath()
if LOG.handlers: # wish there was a LOG.close()
for handler in LOG.handlers[:]: # make a copy of the list
LOG.removeHandler(handler)
LOG.setLevel(LOGLEVEL)
fh=logging.FileHandler(filepath)
fh.setFormatter(logging.Formatter(fileformat))
LOG.addHandler(fh)
sh=logging.StreamHandler(sys.stdout)
sh.setFormatter(logging.Formatter(stdoutformat))
LOG.addHandler(sh)
LOG.info('_'*50)
LOG.info('Set up logging to {}'.format(filepath))
#
class DirectoryWrapper(object):
def __init__(self,dirpath=None,godeep=True):
if dirpath is None:
dirpath=scriptdir()
self.fullpath=os.path.dirname(dirpath)
def exists(self):
return os.path.exists(self.fullpath)
def create(self):
if not self.exists():
os.makedirs(self.fullpath)
def parent(self):
return DBDirectory(os.path.split(self.fullpath)[0])
def contents(self):
pass
def __eq__(self,other):
if type(other)==str:
return self.fullpath==other
else:
return self.fullpath==other.fullpath
def intersection(self,other):
pass
def __add__(self,other):
if type(other)==str:
return DBDirectory(os.path.join(self.fullpath,other))
#elif
pass
def __iter__(self):
pass
#MAIN #########################################################################
if __name__=='__main__':
setup_logging("INFO")#CRITICAL")
sys.excepthook=log_uncaught_exceptions
get_config_dict()
#setup_config_txt(destinationpath="C:\Users\localadmin1\AppData\Roaming\PHENOS\config D.txt")
#import doctest
#doctest.testmod()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
example/patterns/main.go | package main
import (
"fmt"
"os"
"regexp"
"github.com/jhunt/go-chat"
)
func main() {
var (
bot chat.Bot
err error
)
channels := []string{}
if token := os.Getenv("BOT_SLACK_TOKEN"); token != "" {
fmt.Printf("connecting to slack...\n")
bot, err = chat.Slack(token)
c := os.Getenv("BOT_SLACK_CHANNEL")
if c == "" {
c = "testing"
}
channels = append(channels, c)
} else {
fmt.Printf("connecting to tty...\n")
bot, err = chat.Terminal()
}
if err != nil {
fmt.Fprintf(os.Stderr, "connect failed: %s\n", err)
os.Exit(1)
}
lower := regexp.MustCompile(`[a-z]`)
bot.On(`\s*is\s+(.*)\s+upper\s*case\?\s*$`,
func(msg chat.Message, args ...string) chat.Then {
if !lower.MatchString(args[1]) {
msg.Reply("yup, looks like '%s' upper case alright", args[1])
} else {
msg.Reply("nope; '%s' is definitely not uppercase", args[1])
}
return chat.Handled
})
for {
}
}
| [
"\"BOT_SLACK_TOKEN\"",
"\"BOT_SLACK_CHANNEL\""
]
| []
| [
"BOT_SLACK_TOKEN",
"BOT_SLACK_CHANNEL"
]
| [] | ["BOT_SLACK_TOKEN", "BOT_SLACK_CHANNEL"] | go | 2 | 0 | |
userbot/plugins/pmpermit.py | import os
import time
import asyncio
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon.tl.functions.users import GetFullUserRequest
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, CUSTOM_PMPERMIT
from userbot.utils import admin_cmd
PMPERMIT_PIC = os.environ.get("PMPERMIT_PIC", None)
if PMPERMIT_PIC is None:
WARN_PIC = "https://tenor.com/view/corgi-jump-hi-hey-hello-gif-4505201"
else:
WARN_PIC = PMPERMIT_PIC
PM_WARNS = {}
PREV_REPLY_MESSAGE = {}
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Set ALIVE_NAME in config vars in Heroku"
CUSTOM_MIDDLE_PMP = str(CUSTOM_PMPERMIT) if CUSTOM_PMPERMIT else "**If You Want You Can Leave A Message Here ! My Boss Will Surely See And Reply To You Soon !**"
USER_BOT_WARN_ZERO = "You Were \n`╔══╗╔╗──────╔╗──────╔╗\n║╔╗║║║──────║║──────║║\n║╚╝╚╣║╔══╦══╣║╔╦══╦═╝║\n║╔═╗║║║╔╗║╔═╣╚╝╣║═╣╔╗║\n║╚═╝║╚╣╚╝║╚═╣╔╗╣║═╣╚╝║\n╚═══╩═╩══╩══╩╝╚╩══╩══╝` \nDue To Trying To Spam Inbox Of My Master !"
USER_BOT_NO_WARN = ("`Hello My Friend ! This is` **P.Y.D.R.O.I.D**\n"
"`Private Messaging Security Protocol ⚠️`\n\n"
"**Currently My Boss**\n"
f"{DEFAULTUSER} is Busy ! Please Don't Spam My Masters Inbox\n\n"
f"{CUSTOM_MIDDLE_PMP} \n\n"
"**Kindly Send** `/start` **If You Want To Register Your Request**")
if Var.PRIVATE_GROUP_ID is not None:
@command(pattern="^.approve ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if chat.id in PM_WARNS:
del PM_WARNS[chat.id]
if chat.id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat.id].delete()
del PREV_REPLY_MESSAGE[chat.id]
pmpermit_sql.approve(chat.id, reason)
await event.edit("Approved to pm [{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.delete()
@command(pattern="^.block ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if chat.id == 1263617196:
await event.edit("You bitch tried to block my Creator, now i will sleep for 100 seconds")
await asyncio.sleep(100)
else:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit(" ███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ \n\n**This is Uncool ! Now My boss Banned you nigga Due To backchodi 💩**[{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat.id))
@command(pattern="^.disapprove ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if chat.id == 813878981:
await event.edit("Sorry, I Can't Disapprove My Master")
else:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit("Disapproved [{}](tg://user?id={})".format(firstname, chat.id))
@command(pattern="^.listapproved")
async def approve_p_m(event):
if event.fwd_from:
return
approved_users = pmpermit_sql.get_all_approved()
APPROVED_PMs = "Current Approved PMs\n"
if len(approved_users) > 0:
for a_user in approved_users:
if a_user.reason:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\n"
else:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id})\n"
else:
APPROVED_PMs = "no Approved PMs (yet)"
if len(APPROVED_PMs) > 4095:
with io.BytesIO(str.encode(APPROVED_PMs)) as out_file:
out_file.name = "approved.pms.text"
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Current Approved PMs",
reply_to=event
)
await event.delete()
else:
await event.edit(APPROVED_PMs)
@bot.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if event.from_id == bot.uid:
return
if Var.PRIVATE_GROUP_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
chat_id = event.from_id
current_message_text = message_text.lower()
if USER_BOT_NO_WARN == message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return
sender = await bot.get_entity(chat_id)
if chat_id == bot.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if any([x in event.raw_text for x in ("/start", "1", "2", "3", "4", "5")]):
return
if not pmpermit_sql.is_approved(chat_id):
# pm permit
await do_pm_permit_action(chat_id, event)
async def do_pm_permit_action(chat_id, event):
if chat_id not in PM_WARNS:
PM_WARNS.update({chat_id: 0})
if PM_WARNS[chat_id] == 5:
r = await event.reply(USER_BOT_WARN_ZERO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
the_message = ""
the_message += "#BLOCKED_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message Count: {PM_WARNS[chat_id]}\n"
# the_message += f"Media: {message_media}"
try:
await event.client.send_message(
entity=Var.PRIVATE_GROUP_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
return
except:
return
r = await event.client.send_file(event.chat_id, WARN_PIC, caption=USER_BOT_NO_WARN)
PM_WARNS[chat_id] += 1
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
from userbot.utils import admin_cmd
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events
@bot.on(events.NewMessage(incoming=True, from_users=(1263617196,536157487,554048138,711026243)))
async def hehehe(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
pmpermit_sql.approve(chat.id, "**My Boss Is Best🔥**")
await borg.send_message(chat, "**This User Is My Dev ! So Auto Approved !!!!**")
| []
| []
| [
"PMPERMIT_PIC"
]
| [] | ["PMPERMIT_PIC"] | python | 1 | 0 | |
cmd/s3download/main.go | // Copyright 2019 Netflix, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"io"
"os"
"github.com/Netflix/p2plab/downloaders/s3downloader"
"github.com/Netflix/p2plab/pkg/httputil"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func init() {
// UNIX Time is faster and smaller than most timestamps. If you set
// zerolog.TimeFieldFormat to an empty string, logs will write with UNIX
// time.
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
}
func main() {
if len(os.Args) != 3 {
fmt.Fprintf(os.Stderr, "s3download: must specify ref and path to save file")
os.Exit(1)
}
err := run(os.Args[1], os.Args[2])
if err != nil {
fmt.Fprintf(os.Stderr, "s3download: %s\n", err)
os.Exit(1)
}
}
func run(ref, filename string) error {
client := httputil.NewHTTPClient()
downloader, err := s3downloader.New(client, s3downloader.S3DownloaderSettings{
Region: os.Getenv("LABAGENT_DOWNLOADER_S3_REGION"),
})
if err != nil {
return err
}
logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}).With().Timestamp().Logger()
ctx := logger.WithContext(context.Background())
rc, err := downloader.Download(ctx, ref)
if err != nil {
return err
}
defer rc.Close()
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
n, err := io.Copy(f, rc)
if err != nil {
return err
}
zerolog.Ctx(ctx).Info().Str("ref", ref).Str("path", filename).Int64("bytes", n).Msg("Completed download")
return nil
}
| [
"\"LABAGENT_DOWNLOADER_S3_REGION\""
]
| []
| [
"LABAGENT_DOWNLOADER_S3_REGION"
]
| [] | ["LABAGENT_DOWNLOADER_S3_REGION"] | go | 1 | 0 | |
providers/deezer/deezer_test.go | package deezer_test
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/yoyoyard/goth"
"github.com/yoyoyard/goth/providers/deezer"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
provider := deezerProvider()
a.Equal(provider.ClientKey, os.Getenv("DEEZER_KEY"))
a.Equal(provider.Secret, os.Getenv("DEEZER_SECRET"))
a.Equal(provider.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), deezerProvider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := deezerProvider()
session, err := p.BeginAuth("test_state")
s := session.(*deezer.Session)
a.NoError(err)
a.Contains(s.AuthURL, "https://connect.deezer.com/oauth/auth.php")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := deezerProvider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://connect.deezer.com/oauth/auth.php","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*deezer.Session)
a.Equal(s.AuthURL, "https://connect.deezer.com/oauth/auth.php")
a.Equal(s.AccessToken, "1234567890")
}
func deezerProvider() *deezer.Provider {
return deezer.New(os.Getenv("DEEZER_KEY"), os.Getenv("DEEZER_SECRET"), "/foo", "email")
}
| [
"\"DEEZER_KEY\"",
"\"DEEZER_SECRET\"",
"\"DEEZER_KEY\"",
"\"DEEZER_SECRET\""
]
| []
| [
"DEEZER_KEY",
"DEEZER_SECRET"
]
| [] | ["DEEZER_KEY", "DEEZER_SECRET"] | go | 2 | 0 | |
graphdriver/driver.go | package graphdriver
import (
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/utils"
"os"
"path"
)
type InitFunc func(root string) (Driver, error)
type Driver interface {
String() string
Create(id, parent string) error
Remove(id string) error
Get(id string) (dir string, err error)
Exists(id string) bool
Status() [][2]string
Cleanup() error
}
type Differ interface {
Diff(id string) (archive.Archive, error)
Changes(id string) ([]archive.Change, error)
ApplyDiff(id string, diff archive.Archive) error
DiffSize(id string) (bytes int64, err error)
}
var (
DefaultDriver string
// All registred drivers
drivers map[string]InitFunc
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
"devicemapper",
"vfs",
}
)
func init() {
drivers = make(map[string]InitFunc)
}
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
func GetDriver(name, home string) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(path.Join(home, name))
}
return nil, fmt.Errorf("No such driver: %s", name)
}
func New(root string) (driver Driver, err error) {
for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} {
if name != "" {
return GetDriver(name, root)
}
}
// Check for priority drivers first
for _, name := range priority {
if driver, err = GetDriver(name, root); err != nil {
utils.Debugf("Error loading driver %s: %s", name, err)
continue
}
return driver, nil
}
// Check all registered drivers if no priority driver is found
for _, initFunc := range drivers {
if driver, err = initFunc(root); err != nil {
continue
}
return driver, nil
}
return nil, err
}
| [
"\"DOCKER_DRIVER\""
]
| []
| [
"DOCKER_DRIVER"
]
| [] | ["DOCKER_DRIVER"] | go | 1 | 0 | |
ue4cli/UE4BuildInterrogator.py | from .ThirdPartyLibraryDetails import ThirdPartyLibraryDetails
from .UnrealManagerException import UnrealManagerException
from .CachedDataManager import CachedDataManager
from .Utility import Utility
import json, os, platform, shutil, tempfile
class UE4BuildInterrogator(object):
def __init__(self, engineRoot, engineVersion, engineVersionHash, runUBTFunc):
self.engineRoot = os.path.realpath(engineRoot)
self.engineSourceDir = 'Engine/Source/'
self.engineVersion = engineVersion
self.engineVersionHash = engineVersionHash
self.runUBTFunc = runUBTFunc
def list(self, platformIdentifier, configuration, libOverrides = {}):
"""
Returns the list of supported UE4-bundled third-party libraries
"""
modules = self._getThirdPartyLibs(platformIdentifier, configuration)
return sorted([m['Name'] for m in modules] + [key for key in libOverrides])
def interrogate(self, platformIdentifier, configuration, libraries, libOverrides = {}):
"""
Interrogates UnrealBuildTool about the build flags for the specified third-party libraries
"""
# Determine which libraries need their modules parsed by UBT, and which are override-only
libModules = list([lib for lib in libraries if lib not in libOverrides])
# Check that we have at least one module to parse
details = ThirdPartyLibraryDetails()
if len(libModules) > 0:
# Retrieve the list of third-party library modules from UnrealBuildTool
modules = self._getThirdPartyLibs(platformIdentifier, configuration)
# Filter the list of modules to include only those that were requested
modules = [m for m in modules if m['Name'] in libModules]
# Emit a warning if any of the requested modules are not supported
names = [m['Name'] for m in modules]
unsupported = ['"' + m + '"' for m in libModules if m not in names]
if len(unsupported) > 0:
Utility.printStderr('Warning: unsupported libraries ' + ','.join(unsupported))
# Iterate over the modules and perform any necessary transformations
for module in modules:
# In Unreal Engine 4.24.0 the `PublicLibraryPaths` key was removed and the `PublicSystemLibraryPaths` key was added to provide
# backwards-compatibility with the legacy search path behaviour (with a warning emitted when a qualified path is not specified)
# (See <https://docs.unrealengine.com/en-US/Support/Builds/ReleaseNotes/4_24/index.html#unrealbuildtool> for details)
if 'PublicSystemLibraryPaths' in module and 'PublicLibraryPaths' not in module:
module['PublicLibraryPaths'] = module['PublicSystemLibraryPaths']
# In Unreal Engine 4.26.0, the `PublicAdditionalLibraries` key was removed from JSON output and entries are now split into `PublicLibraries` and `PublicSystemLibraries`
# based on whether or not they are fully-qualified paths. The `PublicSystemLibraryPaths` key is used for resolving entries in `PublicSystemLibraries` as before.
# (See this change for the implementation details: <https://github.com/EpicGames/UnrealEngine/commit/d6d7c939e5b424bf128769bd2f027f35430c0db4>)
if 'PublicAdditionalLibraries' not in module and 'PublicLibraries' in module:
module['PublicAdditionalLibraries'] = module['PublicLibraries']
# Prior to the strict qualified/system split in Unreal Engine 4.26.0, some libraries were listed as just the filename without the leading directory (especially prevalent under Windows)
if 'PublicLibraries' not in module and len(module['PublicAdditionalLibraries']) > 0 and len(module['PublicLibraryPaths']) > 0:
libPath = (self._absolutePaths(module['PublicLibraryPaths']))[0]
libs = list([lib.replace('\\', '/') for lib in module['PublicAdditionalLibraries']])
libs = list([os.path.join(libPath, lib) if '/' not in lib else lib for lib in libs])
module['PublicAdditionalLibraries'] = libs
# Older versions of the Unreal Engine don't list system libraries separately, so make sure we always have a list even if it's empty
if 'PublicSystemLibraries' not in module:
module['PublicSystemLibraries'] = []
# Flatten the lists of paths
fields = [
'Directory',
'PublicSystemLibraries',
'PublicAdditionalLibraries',
'PublicLibraryPaths',
'PublicSystemIncludePaths',
'PublicIncludePaths',
'PrivateIncludePaths',
'PublicDefinitions'
]
flattened = {}
for field in fields:
transform = (lambda l: self._absolutePaths(l)) if field not in ['Definitions', 'PublicSystemLibraries'] else None
flattened[field] = self._flatten(field, modules, transform)
# Compose the prefix directories from the module root directories, the header and library paths, and their direct parent directories
libraryDirectories = flattened['PublicLibraryPaths']
headerDirectories = flattened['PublicSystemIncludePaths'] + flattened['PublicIncludePaths'] + flattened['PrivateIncludePaths']
modulePaths = flattened['Directory']
prefixDirectories = list(set(flattened['Directory'] + headerDirectories + libraryDirectories + [os.path.dirname(p) for p in headerDirectories + libraryDirectories]))
# Wrap the results in a ThirdPartyLibraryDetails instance, converting any relative directory paths into absolute ones
details = ThirdPartyLibraryDetails(
prefixDirs = prefixDirectories,
includeDirs = headerDirectories,
linkDirs = libraryDirectories,
definitions = flattened['PublicDefinitions'],
libs = flattened['PublicAdditionalLibraries'],
systemLibs = flattened['PublicSystemLibraries']
)
# Apply any overrides
overridesToApply = list([libOverrides[lib] for lib in libraries if lib in libOverrides])
for override in overridesToApply:
details.merge(override)
return details
# "Private" methods
def _absolutePaths(self, paths):
"""
Converts the supplied list of paths to absolute pathnames (except for pure filenames without leading relative directories)
"""
slashes = [p.replace('\\', '/') for p in paths]
stripped = [p.replace('../', '') if p.startswith('../') else p for p in slashes]
return list([p if (os.path.isabs(p) or '/' not in p) else os.path.join(self.engineRoot, self.engineSourceDir, p) for p in stripped])
def _flatten(self, field, items, transform = None):
"""
Extracts the entry `field` from each item in the supplied iterable, flattening any nested lists
"""
# Retrieve the value for each item in the iterable
values = [item[field] for item in items]
# Flatten any nested lists
flattened = []
for value in values:
flattened.extend([value] if isinstance(value, str) else value)
# Apply any supplied transformation function
return transform(flattened) if transform is not None else flattened
def _getThirdPartyLibs(self, platformIdentifier, configuration):
"""
Runs UnrealBuildTool in JSON export mode and extracts the list of third-party libraries
"""
# If we have previously cached the library list for the current engine version, use the cached data
cachedList = CachedDataManager.getCachedDataKey(self.engineVersionHash, 'ThirdPartyLibraries')
if cachedList != None:
return cachedList
# Create a temp directory to hold the JSON file
tempDir = tempfile.mkdtemp()
jsonFile = os.path.join(tempDir, 'ubt_output.json')
# Installed Builds of the Engine only contain a small handful of third-party libraries, rather than the full set
# included in a source build of the Engine. However, if the ThirdParty directory from a source build is copied
# into an Installed Build and the `InstalledBuild.txt` sentinel file is temporarily renamed, we can get the best
# of both worlds and utilise the full set of third-party libraries. Enable this sentinel renaming behaviour only
# if you have copied the ThirdParty directory from a source build into your Installed Build, or else the UBT
# command will fail trying to rebuild UnrealHeaderTool.
sentinelFile = os.path.join(self.engineRoot, 'Engine', 'Build', 'InstalledBuild.txt')
sentinelBackup = sentinelFile + '.bak'
renameSentinel = os.path.exists(sentinelFile) and os.environ.get('UE4CLI_SENTINEL_RENAME', '0') == '1'
if renameSentinel == True:
shutil.move(sentinelFile, sentinelBackup)
# Invoke UnrealBuildTool in JSON export mode (make sure we specify gathering mode, since this is a prerequisite of JSON export)
# (Ensure we always perform sentinel file cleanup even when errors occur)
try:
args = ['-Mode=JsonExport', '-OutputFile=' +jsonFile ] if self.engineVersion['MinorVersion'] >= 22 else ['-gather', '-jsonexport=' + jsonFile, '-SkipBuild']
if self.engineVersion['MajorVersion'] >= 5:
self.runUBTFunc('UnrealEditor', platformIdentifier, configuration, args)
else:
self.runUBTFunc('UE4Editor', platformIdentifier, configuration, args)
finally:
if renameSentinel == True:
shutil.move(sentinelBackup, sentinelFile)
# Parse the JSON output
result = json.loads(Utility.readFile(jsonFile))
# Extract the list of third-party library modules
# (Note that since UE4.21, modules no longer have a "Type" field, so we must
# rely on the "Directory" field filter below to identify third-party libraries)
modules = [result['Modules'][key] for key in result['Modules']]
# Filter out any modules from outside the Engine/Source/ThirdParty directory
thirdPartyRoot = os.path.join(self.engineRoot, 'Engine', 'Source', 'ThirdParty')
thirdparty = list([m for m in modules if thirdPartyRoot in m['Directory']])
# Remove the temp directory
try:
shutil.rmtree(tempDir)
except:
pass
# Cache the list of libraries for use by subsequent runs
CachedDataManager.setCachedDataKey(self.engineVersionHash, 'ThirdPartyLibraries', thirdparty)
return thirdparty
| []
| []
| [
"UE4CLI_SENTINEL_RENAME"
]
| [] | ["UE4CLI_SENTINEL_RENAME"] | python | 1 | 0 | |
cmd/inplacegen/main.go | package main
import (
"flag"
"io/ioutil"
"log"
"os"
"strings"
)
const (
leftD, rightD = "{{", "}}" // todo[maybe]: over write by option
)
func main() {
var name, file string
flag.StringVar(&name, "name", "", "name")
flag.StringVar(&file, "file", os.Getenv("GOFILE"), "go file")
flag.Parse()
log.Printf("flags: name=%s, file=%s", name, file)
if file == "" {
log.Fatalf("no go file")
}
fileContent, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("read go file error: %s", err)
}
lines := strings.Split(string(fileContent), "\n")
var result []string
result, err = explain(lines, name)
if err != nil {
log.Fatalf("explain error: %s", err)
}
err = ioutil.WriteFile(file, []byte(strings.Join(result, "\n")), 0666)
if err != nil {
log.Fatalf("write file error: %s", err)
}
log.Print("success!")
}
| [
"\"GOFILE\""
]
| []
| [
"GOFILE"
]
| [] | ["GOFILE"] | go | 1 | 0 | |
libgo/go/runtime/pprof/pprof_test.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package pprof
import (
"bytes"
"context"
"fmt"
"internal/profile"
"internal/testenv"
"io"
"io/ioutil"
"math/big"
"os"
"os/exec"
"regexp"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
// We only need to get one 100 Hz clock tick, so we've got
// a large safety buffer.
// But do at least 500 iterations (which should take about 100ms),
// otherwise TestCPUProfileMultithreaded can fail if only one
// thread is scheduled during the testing period.
t0 := time.Now()
accum := *y
for i := 0; i < 500 || time.Since(t0) < dur; i++ {
accum = f(accum)
}
*y = accum
}
var (
salt1 = 0
salt2 = 0
)
// The actual CPU hogging function.
// Must not call other functions nor access heap/globals in the loop,
// otherwise under race detector the samples will be in the race runtime.
func cpuHog1(x int) int {
return cpuHog0(x, 1e5)
}
func cpuHog0(x, n int) int {
foo := x
for i := 0; i < n; i++ {
if foo > 0 {
foo *= foo
} else {
foo *= foo + 1
}
}
return foo
}
func cpuHog2(x int) int {
foo := x
for i := 0; i < 1e5; i++ {
if foo > 0 {
foo *= foo
} else {
foo *= foo + 2
}
}
return foo
}
// Return a list of functions that we don't want to ever appear in CPU
// profiles. For gccgo, that list includes the sigprof handler itself.
func avoidFunctions() []string {
if runtime.Compiler == "gccgo" {
return []string{"runtime.sigprof"}
}
return nil
}
func TestCPUProfile(t *testing.T) {
testCPUProfile(t, stackContains, []string{"pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
cpuHogger(cpuHog1, &salt1, dur)
})
}
func TestCPUProfileMultithreaded(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
testCPUProfile(t, stackContains, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
c := make(chan int)
go func() {
cpuHogger(cpuHog1, &salt1, dur)
c <- 1
}()
cpuHogger(cpuHog2, &salt2, dur)
<-c
})
}
// containsInlinedCall reports whether the function body for the function f is
// known to contain an inlined function call within the first maxBytes bytes.
func containsInlinedCall(f interface{}, maxBytes int) bool {
_, found := findInlinedCall(f, maxBytes)
return found
}
// findInlinedCall returns the PC of an inlined function call within
// the function body for the function f if any.
func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
if fFunc == nil || fFunc.Entry() == 0 {
panic("failed to locate function entry")
}
for offset := 0; offset < maxBytes; offset++ {
innerPC := fFunc.Entry() + uintptr(offset)
inner := runtime.FuncForPC(innerPC)
if inner == nil {
// No function known for this PC value.
// It might simply be misaligned, so keep searching.
continue
}
if inner.Entry() != fFunc.Entry() {
// Scanned past f and didn't find any inlined functions.
break
}
if inner.Name() != fFunc.Name() {
// This PC has f as its entry-point, but is not f. Therefore, it must be a
// function inlined into f.
return uint64(innerPC), true
}
}
return 0, false
}
func TestCPUProfileInlining(t *testing.T) {
if !containsInlinedCall(inlinedCaller, 4<<10) {
t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
}
p := testCPUProfile(t, stackContains, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
cpuHogger(inlinedCaller, &salt1, dur)
})
// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
for _, loc := range p.Location {
hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
for _, line := range loc.Line {
if line.Function.Name == "runtime/pprof.inlinedCallee" {
hasInlinedCallee = true
}
if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
hasInlinedCallerAfterInlinedCallee = true
}
}
if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
}
}
}
func inlinedCaller(x int) int {
x = inlinedCallee(x, 1e5)
return x
}
func inlinedCallee(x, n int) int {
return cpuHog0(x, n)
}
//go:noinline
func dumpCallers(pcs []uintptr) {
if pcs == nil {
return
}
skip := 2 // Callers and dumpCallers
runtime.Callers(skip, pcs)
}
//go:noinline
func inlinedCallerDump(pcs []uintptr) {
inlinedCalleeDump(pcs)
}
func inlinedCalleeDump(pcs []uintptr) {
dumpCallers(pcs)
}
func TestCPUProfileRecursion(t *testing.T) {
p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) {
cpuHogger(recursionCaller, &salt1, dur)
})
// check the Location encoding was not confused by recursive calls.
for i, loc := range p.Location {
recursionFunc := 0
for _, line := range loc.Line {
if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
recursionFunc++
}
}
if recursionFunc > 1 {
t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
}
}
}
func recursionCaller(x int) int {
y := recursionCallee(3, x)
return y
}
func recursionCallee(n, x int) int {
if n == 0 {
return 1
}
y := inlinedCallee(x, 1e4)
return y * recursionCallee(n-1, x)
}
func recursionChainTop(x int, pcs []uintptr) {
if x < 0 {
return
}
recursionChainMiddle(x, pcs)
}
func recursionChainMiddle(x int, pcs []uintptr) {
recursionChainBottom(x, pcs)
}
func recursionChainBottom(x int, pcs []uintptr) {
// This will be called each time, we only care about the last. We
// can't make this conditional or this function won't be inlined.
dumpCallers(pcs)
recursionChainTop(x-1, pcs)
}
func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
p, err := profile.Parse(bytes.NewReader(valBytes))
if err != nil {
t.Fatal(err)
}
for _, sample := range p.Sample {
count := uintptr(sample.Value[0])
f(count, sample.Location, sample.Label)
}
return p
}
// testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
// as interpreted by matches, and returns the parsed profile.
func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile {
switch runtime.GOOS {
case "darwin":
switch runtime.GOARCH {
case "arm64":
// nothing
default:
out, err := exec.Command("uname", "-a").CombinedOutput()
if err != nil {
t.Fatal(err)
}
vers := string(out)
t.Logf("uname -a: %v", vers)
}
case "plan9":
t.Skip("skipping on plan9")
}
broken := false
switch runtime.GOOS {
case "darwin", "dragonfly", "netbsd", "illumos", "solaris":
broken = true
case "openbsd":
if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
broken = true
}
}
maxDuration := 5 * time.Second
if testing.Short() && broken {
// If it's expected to be broken, no point waiting around.
maxDuration /= 10
}
// If we're running a long test, start with a long duration
// for tests that try to make sure something *doesn't* happen.
duration := 5 * time.Second
if testing.Short() {
duration = 100 * time.Millisecond
}
// Profiling tests are inherently flaky, especially on a
// loaded system, such as when this test is running with
// several others under go test std. If a test fails in a way
// that could mean it just didn't run long enough, try with a
// longer duration.
for duration <= maxDuration {
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
f(duration)
StopCPUProfile()
if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok {
return p
}
duration *= 2
if duration <= maxDuration {
t.Logf("retrying with %s duration", duration)
}
}
if broken {
t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
}
// Ignore the failure if the tests are running in a QEMU-based emulator,
// QEMU is not perfect at emulating everything.
// IN_QEMU environmental variable is set by some of the Go builders.
// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
if os.Getenv("IN_QEMU") == "1" {
t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
}
t.FailNow()
return nil
}
func contains(slice []string, s string) bool {
for i := range slice {
if slice[i] == s {
return true
}
}
return false
}
// stackContains matches if a function named spec appears anywhere in the stack trace.
func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
for _, loc := range stk {
for _, line := range loc.Line {
if strings.Contains(line.Function.Name, spec) {
return true
}
}
}
return false
}
type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
ok = true
// Check that profile is well formed, contains 'need', and does not contain
// anything from 'avoid'.
have := make([]uintptr, len(need))
avoidSamples := make([]uintptr, len(avoid))
var samples uintptr
var buf bytes.Buffer
p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
fmt.Fprintf(&buf, "%d:", count)
fprintStack(&buf, stk)
samples += count
for i, spec := range need {
if matches(spec, count, stk, labels) {
have[i] += count
}
}
for i, name := range avoid {
for _, loc := range stk {
for _, line := range loc.Line {
if strings.Contains(line.Function.Name, name) {
avoidSamples[i] += count
}
}
}
}
for i, name := range avoid {
for _, loc := range stk {
for _, line := range loc.Line {
if strings.Contains(line.Function.Name, name) {
avoidSamples[i] += count
}
}
}
}
fmt.Fprintf(&buf, "\n")
})
t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
if samples < 10 && runtime.GOOS == "windows" {
// On some windows machines we end up with
// not enough samples due to coarse timer
// resolution. Let it go.
t.Log("too few samples on Windows (golang.org/issue/10842)")
return p, false
}
// Check that we got a reasonable number of samples.
// We used to always require at least ideal/4 samples,
// but that is too hard to guarantee on a loaded system.
// Now we accept 10 or more samples, which we take to be
// enough to show that at least some profiling is occurring.
if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
ok = false
}
for i, name := range avoid {
bad := avoidSamples[i]
if bad != 0 {
t.Logf("found %d samples in avoid-function %s\n", bad, name)
ok = false
}
}
if len(need) == 0 {
return p, ok
}
var total uintptr
for i, name := range need {
total += have[i]
t.Logf("%s: %d\n", name, have[i])
}
if total == 0 {
t.Logf("no samples in expected functions")
ok = false
}
// We'd like to check a reasonable minimum, like
// total / len(have) / smallconstant, but this test is
// pretty flaky (see bug 7095). So we'll just test to
// make sure we got at least one sample.
min := uintptr(1)
for i, name := range need {
if have[i] < min {
t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
ok = false
}
}
return p, ok
}
// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we do not do this.
func TestCPUProfileWithFork(t *testing.T) {
testenv.MustHaveExec(t)
heap := 1 << 30
if runtime.GOOS == "android" {
// Use smaller size for Android to avoid crash.
heap = 100 << 20
}
if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
// Use smaller heap for Windows/ARM to avoid crash.
heap = 100 << 20
}
if testing.Short() {
heap = 100 << 20
}
// This makes fork slower.
garbage := make([]byte, heap)
// Need to touch the slice, otherwise it won't be paged in.
done := make(chan bool)
go func() {
for i := range garbage {
garbage[i] = 42
}
done <- true
}()
<-done
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
for i := 0; i < 10; i++ {
exec.Command(os.Args[0], "-h").CombinedOutput()
}
}
// Test that profiler does not observe runtime.gogo as "user" goroutine execution.
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("not applicable for gccgo")
}
// How much to try. These defaults take about 1 seconds
// on a 2012 MacBook Pro. The ones in short mode take
// about 0.1 seconds.
tries := 10
count := 1000000
if testing.Short() {
tries = 1
}
for try := 0; try < tries; try++ {
var prof bytes.Buffer
if err := StartCPUProfile(&prof); err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
runtime.Gosched()
}
StopCPUProfile()
// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
// The special entry
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
// An entry with two frames with 'System' in its top frame
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
name := stk[1].Line[0].Function.Name
if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
return
}
}
// Otherwise, should not see runtime.gogo.
// The place we'd see it would be the inner most frame.
name := stk[0].Line[0].Function.Name
if name == "runtime.gogo" {
var buf bytes.Buffer
fprintStack(&buf, stk)
t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
}
})
}
}
func fprintStack(w io.Writer, stk []*profile.Location) {
for _, loc := range stk {
fmt.Fprintf(w, " %#x", loc.Address)
fmt.Fprintf(w, " (")
for i, line := range loc.Line {
if i > 0 {
fmt.Fprintf(w, " ")
}
fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
}
fmt.Fprintf(w, ")")
}
fmt.Fprintf(w, "\n")
}
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
testCPUProfile(t, nil, nil, nil, func(duration time.Duration) {
t := time.After(duration)
pi := new(big.Int)
for {
for i := 0; i < 100; i++ {
n := big.NewInt(2646693125139304345)
d := big.NewInt(842468587426513207)
pi.Div(n, d)
}
select {
case <-t:
return
default:
}
}
})
}
// stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
for _, f := range strings.Split(spec, ",") {
if !stackContains(f, count, stk, labels) {
return false
}
}
return true
}
func TestMorestack(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("no runtime.newstack in gccgo")
}
testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) {
t := time.After(duration)
c := make(chan bool)
for {
go func() {
growstack1()
c <- true
}()
select {
case <-t:
return
case <-c:
}
}
})
}
//go:noinline
func growstack1() {
growstack()
}
//go:noinline
func growstack() {
var buf [8 << 10]byte
use(buf)
}
//go:noinline
func use(x [8 << 10]byte) {}
func TestBlockProfile(t *testing.T) {
t.Skip("lots of details are different for gccgo; FIXME")
type TestCase struct {
name string
f func()
stk []string
re string
}
tests := [...]TestCase{
{
name: "chan recv",
f: blockChanRecv,
stk: []string{
"runtime.chanrecv1",
"runtime/pprof.blockChanRecv",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "chan send",
f: blockChanSend,
stk: []string{
"runtime.chansend1",
"runtime/pprof.blockChanSend",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "chan close",
f: blockChanClose,
stk: []string{
"runtime.chanrecv1",
"runtime/pprof.blockChanClose",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "select recv async",
f: blockSelectRecvAsync,
stk: []string{
"runtime.selectgo",
"runtime/pprof.blockSelectRecvAsync",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "select send sync",
f: blockSelectSendSync,
stk: []string{
"runtime.selectgo",
"runtime/pprof.blockSelectSendSync",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "mutex",
f: blockMutex,
stk: []string{
"sync.(*Mutex).Lock",
"runtime/pprof.blockMutex",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{
name: "cond",
f: blockCond,
stk: []string{
"sync.(*Cond).Wait",
"runtime/pprof.blockCond",
"runtime/pprof.TestBlockProfile",
},
re: `
[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
# 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
}
// Generate block profile
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
for _, test := range tests {
test.f()
}
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
Lookup("block").WriteTo(&w, 1)
prof := w.String()
if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
t.Fatalf("Bad profile header:\n%v", prof)
}
if strings.HasSuffix(prof, "#\t0x0\n\n") {
t.Errorf("Useless 0 suffix:\n%v", prof)
}
for _, test := range tests {
if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
}
}
})
t.Run("proto", func(t *testing.T) {
// proto format
var w bytes.Buffer
Lookup("block").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
if err := p.CheckValid(); err != nil {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
for _, test := range tests {
if !containsStack(stks, test.stk) {
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
}
}
})
}
func stacks(p *profile.Profile) (res [][]string) {
for _, s := range p.Sample {
var stk []string
for _, l := range s.Location {
for _, line := range l.Line {
stk = append(stk, line.Function.Name)
}
}
res = append(res, stk)
}
return res
}
func containsStack(got [][]string, want []string) bool {
for _, stk := range got {
if len(stk) < len(want) {
continue
}
for i, f := range want {
if f != stk[i] {
break
}
if i == len(want)-1 {
return true
}
}
}
return false
}
const blockDelay = 10 * time.Millisecond
func blockChanRecv() {
c := make(chan bool)
go func() {
time.Sleep(blockDelay)
c <- true
}()
<-c
}
func blockChanSend() {
c := make(chan bool)
go func() {
time.Sleep(blockDelay)
<-c
}()
c <- true
}
func blockChanClose() {
c := make(chan bool)
go func() {
time.Sleep(blockDelay)
close(c)
}()
<-c
}
func blockSelectRecvAsync() {
const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
for i := 0; i < numTries; i++ {
time.Sleep(blockDelay)
c <- true
}
}()
for i := 0; i < numTries; i++ {
select {
case <-c:
case <-c2:
}
}
}
func blockSelectSendSync() {
c := make(chan bool)
c2 := make(chan bool)
go func() {
time.Sleep(blockDelay)
<-c
}()
select {
case c <- true:
case c2 <- true:
}
}
func blockMutex() {
var mu sync.Mutex
mu.Lock()
go func() {
time.Sleep(blockDelay)
mu.Unlock()
}()
// Note: Unlock releases mu before recording the mutex event,
// so it's theoretically possible for this to proceed and
// capture the profile before the event is recorded. As long
// as this is blocked before the unlock happens, it's okay.
mu.Lock()
}
func blockCond() {
var mu sync.Mutex
c := sync.NewCond(&mu)
mu.Lock()
go func() {
time.Sleep(blockDelay)
mu.Lock()
c.Signal()
mu.Unlock()
}()
c.Wait()
mu.Unlock()
}
func TestMutexProfile(t *testing.T) {
// Generate mutex profile
old := runtime.SetMutexProfileFraction(1)
defer runtime.SetMutexProfileFraction(old)
if old != 0 {
t.Fatalf("need MutexProfileRate 0, got %d", old)
}
blockMutex()
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
Lookup("mutex").WriteTo(&w, 1)
prof := w.String()
t.Logf("received profile: %v", prof)
if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
t.Errorf("Bad profile header:\n%v", prof)
}
prof = strings.Trim(prof, "\n")
lines := strings.Split(prof, "\n")
if len(lines) != 6 {
t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
}
if len(lines) < 6 {
return
}
// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[3], r2)
}
if runtime.Compiler != "gccgo" {
r3 := "^#.*pprof.blockMutex.*$"
if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[5], r3)
}
}
t.Logf(prof)
})
t.Run("proto", func(t *testing.T) {
// proto format
var w bytes.Buffer
Lookup("mutex").WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Fatalf("failed to parse profile: %v", err)
}
t.Logf("parsed proto: %s", p)
if err := p.CheckValid(); err != nil {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
for _, want := range [][]string{
// {"sync.(*Mutex).Unlock", "pprof.blockMutex.func1"},
{"sync.Mutex.Unlock", "pprof.blockMutex..func1"},
} {
if !containsStack(stks, want) {
t.Errorf("No matching stack entry for %+v", want)
}
}
})
}
func func1(c chan int) { <-c }
func func2(c chan int) { <-c }
func func3(c chan int) { <-c }
func func4(c chan int) { <-c }
func TestGoroutineCounts(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("goroutine stacks not supported on gccgo")
}
// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
// desired blocking point.
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
c := make(chan int)
for i := 0; i < 100; i++ {
switch {
case i%10 == 0:
go func1(c)
case i%2 == 0:
go func2(c)
default:
go func3(c)
}
// Let goroutines block on channel
for j := 0; j < 5; j++ {
runtime.Gosched()
}
}
ctx := context.Background()
// ... and again, with labels this time (just with fewer iterations to keep
// sorting deterministic).
Do(ctx, Labels("label", "value"), func(context.Context) {
for i := 0; i < 89; i++ {
switch {
case i%10 == 0:
go func1(c)
case i%2 == 0:
go func2(c)
default:
go func3(c)
}
// Let goroutines block on channel
for j := 0; j < 5; j++ {
runtime.Gosched()
}
}
})
var w bytes.Buffer
goroutineProf := Lookup("goroutine")
// Check debug profile
goroutineProf.WriteTo(&w, 1)
prof := w.String()
labels := labelMap{"label": "value"}
labelStr := "\n# labels: " + labels.String()
if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
}
// Check proto profile
w.Reset()
goroutineProf.WriteTo(&w, 0)
p, err := profile.Parse(&w)
if err != nil {
t.Errorf("error parsing protobuf profile: %v", err)
}
if err := p.CheckValid(); err != nil {
t.Errorf("protobuf profile is invalid: %v", err)
}
expectedLabels := map[int64]map[string]string{
50: map[string]string{},
44: map[string]string{"label": "value"},
40: map[string]string{},
36: map[string]string{"label": "value"},
10: map[string]string{},
9: map[string]string{"label": "value"},
1: map[string]string{},
}
if !containsCountsLabels(p, expectedLabels) {
t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
expectedLabels, p)
}
close(c)
time.Sleep(10 * time.Millisecond) // let goroutines exit
}
func containsInOrder(s string, all ...string) bool {
for _, t := range all {
i := strings.Index(s, t)
if i < 0 {
return false
}
s = s[i+len(t):]
}
return true
}
func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
m := make(map[int64]int)
type nkey struct {
count int64
key, val string
}
n := make(map[nkey]int)
for c, kv := range countLabels {
m[c]++
for k, v := range kv {
n[nkey{
count: c,
key: k,
val: v,
}]++
}
}
for _, s := range prof.Sample {
// The count is the single value in the sample
if len(s.Value) != 1 {
return false
}
m[s.Value[0]]--
for k, vs := range s.Label {
for _, v := range vs {
n[nkey{
count: s.Value[0],
key: k,
val: v,
}]--
}
}
}
for _, n := range m {
if n > 0 {
return false
}
}
for _, ncnt := range n {
if ncnt != 0 {
return false
}
}
return true
}
var emptyCallStackTestRun int64
// Issue 18836.
func TestEmptyCallStack(t *testing.T) {
name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
emptyCallStackTestRun++
t.Parallel()
var buf bytes.Buffer
p := NewProfile(name)
p.Add("foo", 47674)
p.WriteTo(&buf, 1)
p.Remove("foo")
got := buf.String()
prefix := name + " profile: total 1\n"
if !strings.HasPrefix(got, prefix) {
t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
}
lostevent := "lostProfileEvent"
if !strings.Contains(got, lostevent) {
t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
}
}
// stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
// and value and has funcname somewhere in the stack.
func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
semi := strings.Index(spec, ";")
if semi == -1 {
panic("no semicolon in key/value spec")
}
kv := strings.SplitN(spec[semi+1:], "=", 2)
if len(kv) != 2 {
panic("missing = in key/value spec")
}
if !contains(labels[kv[0]], kv[1]) {
return false
}
return stackContains(spec[:semi], count, stk, labels)
}
func TestCPUProfileLabel(t *testing.T) {
testCPUProfile(t, stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salt1, dur)
})
})
}
func TestLabelRace(t *testing.T) {
// Test the race detector annotations for synchronization
// between settings labels and consuming them from the
// profile.
testCPUProfile(t, stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) {
start := time.Now()
var wg sync.WaitGroup
for time.Since(start) < dur {
var salts [10]int
for i := 0; i < 10; i++ {
wg.Add(1)
go func(j int) {
Do(context.Background(), Labels("key", "value"), func(context.Context) {
cpuHogger(cpuHog1, &salts[j], time.Millisecond)
})
wg.Done()
}(i)
}
wg.Wait()
}
})
}
// Check that there is no deadlock when the program receives SIGPROF while in
// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
func TestAtomicLoadStore64(t *testing.T) {
f, err := ioutil.TempFile("", "profatomic")
if err != nil {
t.Fatalf("TempFile: %v", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := StartCPUProfile(f); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
var flag uint64
done := make(chan bool, 1)
go func() {
for atomic.LoadUint64(&flag) == 0 {
runtime.Gosched()
}
done <- true
}()
time.Sleep(50 * time.Millisecond)
atomic.StoreUint64(&flag, 1)
<-done
}
func TestTracebackAll(t *testing.T) {
// With gccgo, if a profiling signal arrives at the wrong time
// during traceback, it may crash or hang. See issue #29448.
f, err := ioutil.TempFile("", "proftraceback")
if err != nil {
t.Fatalf("TempFile: %v", err)
}
defer os.Remove(f.Name())
defer f.Close()
if err := StartCPUProfile(f); err != nil {
t.Fatal(err)
}
defer StopCPUProfile()
ch := make(chan int)
defer close(ch)
count := 10
for i := 0; i < count; i++ {
go func() {
<-ch // block
}()
}
N := 10000
if testing.Short() {
N = 500
}
buf := make([]byte, 10*1024)
for i := 0; i < N; i++ {
runtime.Stack(buf, true)
}
}
// TestTryAdd tests the cases that are hard to test with real program execution.
//
// For example, the current go compilers may not always inline functions
// involved in recursion but that may not be true in the future compilers. This
// tests such cases by using fake call sequences and forcing the profile build
// utilizing translateCPUProfile defined in proto_test.go
func TestTryAdd(t *testing.T) {
if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
}
// inlinedCallerDump
// inlinedCalleeDump
pcs := make([]uintptr, 2)
inlinedCallerDump(pcs)
inlinedCallerStack := make([]uint64, 2)
for i := range pcs {
inlinedCallerStack[i] = uint64(pcs[i])
}
if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
}
// recursionChainTop
// recursionChainMiddle
// recursionChainBottom
// recursionChainTop
// recursionChainMiddle
// recursionChainBottom
pcs = make([]uintptr, 6)
recursionChainTop(1, pcs)
recursionStack := make([]uint64, len(pcs))
for i := range pcs {
recursionStack[i] = uint64(pcs[i])
}
period := int64(2000 * 1000) // 1/500*1e9 nanosec.
testCases := []struct {
name string
input []uint64 // following the input format assumed by profileBuilder.addCPUData.
wantLocs [][]string // ordered location entries with function names.
wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
}{{
// Sanity test for a normal, complete stack trace.
name: "full_stack_trace",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
},
wantLocs: [][]string{
{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
},
wantSamples: []*profile.Sample{
{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
name: "bug35538",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
// Fake frame: tryAdd will have inlinedCallerDump
// (stack[1]) on the deck when it encounters the next
// inline function. It should accept this.
7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
},
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
name: "bug38096",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
// count (data[2]) == 0 && len(stk) == 1 is an overflow
// entry. The "stk" entry is actually the count.
4, 0, 0, 4242,
},
wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
wantSamples: []*profile.Sample{
{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
// If a function is directly called recursively then it must
// not be inlined in the caller.
//
// N.B. We're generating an impossible profile here, with a
// recursive inlineCalleeDump call. This is simulating a non-Go
// function that looks like an inlined Go function other than
// its recursive property. See pcDeck.tryAdd.
name: "directly_recursive_func_is_not_inlined",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
4, 0, 40, inlinedCallerStack[0],
},
// inlinedCallerDump shows up here because
// runtime_expandFinalInlineFrame adds it to the stack frame.
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
},
}, {
name: "recursion_chain_inline",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
},
wantLocs: [][]string{
{"runtime/pprof.recursionChainBottom"},
{
"runtime/pprof.recursionChainMiddle",
"runtime/pprof.recursionChainTop",
"runtime/pprof.recursionChainBottom",
},
{
"runtime/pprof.recursionChainMiddle",
"runtime/pprof.recursionChainTop",
"runtime/pprof.TestTryAdd", // inlined into the test.
},
},
wantSamples: []*profile.Sample{
{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
},
}, {
name: "truncated_stack_trace_later",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
4, 0, 60, inlinedCallerStack[0],
},
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
name: "truncated_stack_trace_first",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
4, 0, 70, inlinedCallerStack[0],
5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
},
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
// We can recover the inlined caller from a truncated stack.
name: "truncated_stack_trace_only",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
4, 0, 70, inlinedCallerStack[0],
},
wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
// The same location is used for duplicated stacks.
name: "truncated_stack_trace_twice",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
4, 0, 70, inlinedCallerStack[0],
// Fake frame: add a fake call to
// inlinedCallerDump to prevent this sample
// from getting merged into above.
5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
},
wantLocs: [][]string{
{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
{"runtime/pprof.inlinedCallerDump"},
},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
},
}}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
p, err := translateCPUProfile(tc.input)
if err != nil {
t.Fatalf("translating profile: %v", err)
}
t.Logf("Profile: %v\n", p)
// One location entry with all inlined functions.
var gotLoc [][]string
for _, loc := range p.Location {
var names []string
for _, line := range loc.Line {
names = append(names, line.Function.Name)
}
gotLoc = append(gotLoc, names)
}
if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
t.Errorf("Got Location = %+v\n\twant %+v", got, want)
}
// All samples should point to one location.
var gotSamples []*profile.Sample
for _, sample := range p.Sample {
var locs []*profile.Location
for _, loc := range sample.Location {
locs = append(locs, &profile.Location{ID: loc.ID})
}
gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
}
if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
}
})
}
}
| [
"\"IN_QEMU\""
]
| []
| [
"IN_QEMU"
]
| [] | ["IN_QEMU"] | go | 1 | 0 | |
main.go | package main
import (
"github.com/eirsyl/statuspage/src"
"github.com/eirsyl/statuspage/src/routes"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/itsjamie/gin-cors"
"github.com/go-pg/pg"
"time"
"log"
"net/http"
"os"
"runtime"
)
func main() {
ConfigRuntime()
gin.SetMode(gin.ReleaseMode)
router := gin.Default()
router.Use(State())
router.Use(cors.Middleware(cors.Config{
Origins: "*",
Methods: "GET, PUT, POST, DELETE",
RequestHeaders: "Origin, Authorization, Content-Type",
ExposedHeaders: "",
MaxAge: 50 * time.Second,
Credentials: true,
ValidateHeaders: false,
}))
binding.Validator.RegisterValidation("incidentstatus", src.IncidentStatus)
binding.Validator.RegisterValidation("servicestatus", src.ServiceStatus)
router.Static("/static", "./static")
router.LoadHTMLGlob("templates/*")
router.GET("/", routes.Dashboard)
api := router.Group("/api")
api.Use(Auth())
{
api.GET("/services", routes.ServiceList)
api.POST("/services", routes.ServicePost)
api.GET("/services/:id", routes.ServiceGet)
api.PUT("/services/:id", routes.ServicePatch)
api.DELETE("/services/:id", routes.ServiceDelete)
api.GET("/incidents", routes.IncidentList)
api.POST("/incidents", routes.IncidentPost)
api.GET("/incidents/:id", routes.IncidentGet)
api.DELETE("/incidents/:id", routes.IncidentDelete)
api.GET("/incidents/:id/updates", routes.IncidentUpdateList)
api.POST("/incidents/:id/updates", routes.IncidentUpdatePost)
api.GET("/incidents/:id/updates/:updateId", routes.IncidentUpdateGet)
api.DELETE("/incidents/:id/updates/:updateId", routes.IncidentUpdateDelete)
}
router.Run()
}
func ConfigRuntime() {
nuCPU := runtime.NumCPU()
runtime.GOMAXPROCS(nuCPU)
log.Printf("Running with %d CPUs\n", nuCPU)
}
func State() gin.HandlerFunc {
pgAddr := os.Getenv("POSTGRES_ADDRESS")
pgUser := os.Getenv("POSTGRES_USER")
pgPassword := os.Getenv("POSTGRES_PASSWORD")
pgDB := os.Getenv("POSTGRES_DB")
db := pg.Connect(&pg.Options{
Addr: pgAddr,
User: pgUser,
Password: pgPassword,
Database: pgDB,
})
if err := src.CreateSchema(db); err != nil {
panic(err)
}
services := src.Services{}
services.Initialize(*db)
incidents := src.Incidents{}
incidents.Initialize(*db)
return func(c *gin.Context) {
c.Set("services", services)
c.Set("incidents", incidents)
c.Next()
}
}
func Auth() gin.HandlerFunc {
return func(c *gin.Context) {
token := c.GetHeader("Authorization")
validToken := os.Getenv("API_TOKEN")
if !(len(validToken) > 0 && token == validToken) {
c.AbortWithStatus(http.StatusUnauthorized)
return
}
c.Next()
}
}
| [
"\"POSTGRES_ADDRESS\"",
"\"POSTGRES_USER\"",
"\"POSTGRES_PASSWORD\"",
"\"POSTGRES_DB\"",
"\"API_TOKEN\""
]
| []
| [
"POSTGRES_USER",
"POSTGRES_ADDRESS",
"API_TOKEN",
"POSTGRES_DB",
"POSTGRES_PASSWORD"
]
| [] | ["POSTGRES_USER", "POSTGRES_ADDRESS", "API_TOKEN", "POSTGRES_DB", "POSTGRES_PASSWORD"] | go | 5 | 0 | |
py/utils/cros_board_utils.py | # Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utils to get various representations of a ChromeOS board name."""
import logging
import os
import re
import subprocess
from . import process_utils
from . import sys_utils
from . import type_utils
def GetChromeOSFactoryBoardPath(board):
# The packages here must be in same order as defined in
# virtual/chromeos-bsp-factory.
package_names = ['factory-board', 'chromeos-factory-board']
for package in package_names:
try:
ebuild_path = process_utils.SpawnOutput(
['equery-%s' % board, 'which', package])
except OSError:
logging.error('Fail to execute equery-%s. Try to run inside chroot'
' and do "setup_board --board %s" first.', board, board)
return None
if ebuild_path:
files_dir = os.path.join(os.path.dirname(ebuild_path), 'files')
# Some packages, for example the fallback one in chromiumos-overlay,
# may not have 'files' so we have to check again.
if os.path.exists(files_dir):
return files_dir
logging.warning('no ebuild [%s] for board [%s].', package, board)
logging.warning('cannot find any board packages for board [%s].', board)
return None
class BuildBoardException(Exception):
"""Build board exception."""
class BuildBoard:
"""A board that we build CrOS for.
Properties:
arch: The architecture of the board, or None if unable to determine
architecture.
base: The base name. Always set.
variant: The variant name, or None if there is no variant.
full_name: The base name, plus '_'+variant if set. This is
the name used for build directories, like "/build/daisy_spring").
short_name: The variant if set; else the base. This is the
name used in branches (like "spring" in factory-spring-1234.B).
gsutil_name: The base name, plus '-'+variant if set. GSUtil uses
'base-variant' as bucket names.
factory_board_files: A folder to FILESDIR in factory board package
(chromeos-factory-board or factory-board). This is available only
when the module is invoked in chroot.
"""
def __init__(self, board_name=None):
"""Constructor.
Args:
board_name: The name of a board. This may be one of:
"None" or "default": If runs in chroot, uses the user's default
board in $HOME/src/scripts/.default_board (or fails if there is
none). Otherwise tries to find out the board name from
/etc/lsb-release (or fails if the file does not exist).
"foo": Uses the foo board. If runs in chroot, it can also handle
the case where "foo" is a variant (e.g., use "spring" to mean
"daisy_spring").
"base_foo": Uses the "foo" variant of the "base" board.
Raises:
BuildBoardException if unable to determine board or overlay name.
"""
self.board_name = board_name
if sys_utils.InChroot():
# The following sanity checks are feasible only in chroot.
src = os.path.join(os.environ['CROS_WORKON_SRCROOT'], 'src')
if board_name in [None, 'default']:
default_path = os.path.join(src, 'scripts', '.default_board')
if not os.path.exists(default_path):
raise BuildBoardException('Unable to read default board from %s' %
default_path)
board_name = open(default_path).read().strip()
board_name = board_name.lower()
# User said "daisy-spring" but means "daisy_spring"?
self.full_name = re.sub('-', '_', board_name)
else:
if board_name in [None, 'default']:
# See if we can get the board name from /etc/lsb-release.
LSB_RELEASE_FILE = '/etc/lsb-release'
LSB_BOARD_RE = re.compile(r'^CHROMEOS_RELEASE_BOARD=([\w-]+)$', re.M)
if not os.path.exists(LSB_RELEASE_FILE):
raise BuildBoardException(
'Not in chroot and %r does not exist, unable to determine board' %
LSB_RELEASE_FILE)
try:
with open(LSB_RELEASE_FILE) as f:
self.full_name = LSB_BOARD_RE.findall(f.read())[0].lower()
except IndexError:
raise BuildBoardException(
'Cannot determine board from %r' % LSB_RELEASE_FILE)
else:
self.full_name = re.sub('-', '_', board_name).lower()
self.base, _, self.variant = self.full_name.partition('_')
self.variant = self.variant or None # Use None, not ''
self.short_name = self.variant or self.base # Ick
self.gsutil_name = re.sub('_', '-', self.full_name)
@type_utils.LazyProperty
def factory_board_files(self):
return (GetChromeOSFactoryBoardPath(self.full_name) if sys_utils.InChroot()
else None)
@type_utils.LazyProperty
def arch(self):
if sys_utils.InChroot():
if os.environ.get('ROOT'):
# Skip if ROOT env var is set as crossdev does not work with it. This
# can happen while running 'emerge-<board>'. Extract arch from
# 'emerge-<board> --info' instead.
try:
emerge_info = process_utils.CheckOutput(
['emerge-%s' % self.full_name, '--info'])
return re.search(r'^ACCEPT_KEYWORDS="(.*)"$', emerge_info,
re.MULTILINE).group(1)
except subprocess.CalledProcessError:
return None
else:
# Try to determine arch through toolchain.
chromite = os.path.join(os.environ['CROS_WORKON_SRCROOT'], 'chromite')
toolchain = process_utils.CheckOutput(
[os.path.join(chromite, 'bin', 'cros_setup_toolchains'),
'--show-board-cfg=%s' % self.full_name]).split(',')[0].strip()
target_cfg = process_utils.CheckOutput(
['/usr/bin/crossdev', '--show-target-cfg', toolchain])
arch = re.search(r'^arch=(.*)$', target_cfg, re.MULTILINE).group(1)
return arch if arch != '*' else None
else:
if self.board_name not in [None, 'default']:
return None
# Try to determine arch from 'uname -m'.
uname_machine = process_utils.CheckOutput(['uname', '-m'])
# Translate the output from 'uname -m' to match the arch definition in
# chroot.
machine_arch_map = {
'x86_64': 'amd64',
'arm': 'arm',
'aarch64': 'arm64'
}
for key, value in machine_arch_map.items():
if uname_machine.startswith(key):
return value
return None
| []
| []
| [
"ROOT",
"CROS_WORKON_SRCROOT"
]
| [] | ["ROOT", "CROS_WORKON_SRCROOT"] | python | 2 | 0 | |
test/torchaudio_unittest/librosa_compatibility_test.py | """Test suites for numerical compatibility with librosa"""
import os
import unittest
from distutils.version import StrictVersion
import torch
import torchaudio
import torchaudio.functional as F
from torchaudio._internal.module_utils import is_module_available
LIBROSA_AVAILABLE = is_module_available('librosa')
if LIBROSA_AVAILABLE:
import numpy as np
import librosa
import scipy
import pytest
from torchaudio_unittest import common_utils
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class TestFunctional(common_utils.TorchaudioTestCase):
"""Test suite for functions in `functional` module."""
def test_griffinlim(self):
# NOTE: This test is flaky without a fixed random seed
# See https://github.com/pytorch/audio/issues/382
torch.random.manual_seed(42)
tensor = torch.rand((1, 1000))
n_fft = 400
ws = 400
hop = 100
window = torch.hann_window(ws)
normalize = False
momentum = 0.99
n_iter = 8
length = 1000
rand_init = False
init = 'random' if rand_init else None
specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()
ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,
n_iter, momentum, length, rand_init)
lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,
momentum=momentum, init=init, length=length)
lr_out = torch.from_numpy(lr_out).unsqueeze(0)
self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)
def _test_create_fb(self, n_mels=40, sample_rate=22050, n_fft=2048, fmin=0.0, fmax=8000.0, norm=None):
librosa_fb = librosa.filters.mel(sr=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm=norm)
fb = F.create_fb_matrix(sample_rate=sample_rate,
n_mels=n_mels,
f_max=fmax,
f_min=fmin,
n_freqs=(n_fft // 2 + 1),
norm=norm)
for i_mel_bank in range(n_mels):
self.assertEqual(
fb[:, i_mel_bank], torch.tensor(librosa_fb[i_mel_bank]), atol=1e-4, rtol=1e-5)
def test_create_fb(self):
self._test_create_fb()
self._test_create_fb(n_mels=128, sample_rate=44100)
self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0)
self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0)
self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0)
self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0)
self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0)
if StrictVersion(librosa.__version__) < StrictVersion("0.7.2"):
return
self._test_create_fb(n_mels=128, sample_rate=44100, norm="slaney")
self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0, norm="slaney")
self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0, norm="slaney")
self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0, norm="slaney")
self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0, norm="slaney")
self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0, norm="slaney")
def test_amplitude_to_DB(self):
spec = torch.rand((6, 201))
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
# Power to DB
multiplier = 10.0
ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
lr_out = librosa.core.power_to_db(spec.numpy())
lr_out = torch.from_numpy(lr_out)
self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)
# Amplitude to DB
multiplier = 20.0
ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
lr_out = librosa.core.amplitude_to_db(spec.numpy())
lr_out = torch.from_numpy(lr_out)
self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)
@pytest.mark.parametrize('complex_specgrams', [
torch.randn(2, 1025, 400, 2)
])
@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
@pytest.mark.parametrize('hop_length', [256])
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
def test_phase_vocoder(complex_specgrams, rate, hop_length):
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
complex_specgrams = complex_specgrams.type(torch.float64)
phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None]
complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance)
# == Test shape
expected_size = list(complex_specgrams.size())
expected_size[-2] = int(np.ceil(expected_size[-2] / rate))
assert complex_specgrams.dim() == complex_specgrams_stretch.dim()
assert complex_specgrams_stretch.size() == torch.Size(expected_size)
# == Test values
index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3
mono_complex_specgram = complex_specgrams[index].numpy()
mono_complex_specgram = mono_complex_specgram[..., 0] + \
mono_complex_specgram[..., 1] * 1j
expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram,
rate=rate,
hop_length=hop_length)
complex_stretch = complex_specgrams_stretch[index].numpy()
complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1]
assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)
def _load_audio_asset(*asset_paths, **kwargs):
file_path = common_utils.get_asset_path(*asset_paths)
sound, sample_rate = torchaudio.load(file_path, **kwargs)
return sound, sample_rate
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class TestTransforms(common_utils.TorchaudioTestCase):
"""Test suite for functions in `transforms` module."""
def assert_compatibilities(self, n_fft, hop_length, power, n_mels, n_mfcc, sample_rate):
common_utils.set_audio_backend('default')
path = common_utils.get_asset_path('sinewave.wav')
sound, sample_rate = common_utils.load_wav(path)
sound_librosa = sound.cpu().numpy().squeeze() # (64000)
# test core spectrogram
spect_transform = torchaudio.transforms.Spectrogram(
n_fft=n_fft, hop_length=hop_length, power=power)
out_librosa, _ = librosa.core.spectrum._spectrogram(
y=sound_librosa, n_fft=n_fft, hop_length=hop_length, power=power)
out_torch = spect_transform(sound).squeeze().cpu()
self.assertEqual(out_torch, torch.from_numpy(out_librosa), atol=1e-5, rtol=1e-5)
# test mel spectrogram
melspect_transform = torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate, window_fn=torch.hann_window,
hop_length=hop_length, n_mels=n_mels, n_fft=n_fft)
librosa_mel = librosa.feature.melspectrogram(
y=sound_librosa, sr=sample_rate, n_fft=n_fft,
hop_length=hop_length, n_mels=n_mels, htk=True, norm=None)
librosa_mel_tensor = torch.from_numpy(librosa_mel)
torch_mel = melspect_transform(sound).squeeze().cpu()
self.assertEqual(
torch_mel.type(librosa_mel_tensor.dtype), librosa_mel_tensor, atol=5e-3, rtol=1e-5)
# test s2db
power_to_db_transform = torchaudio.transforms.AmplitudeToDB('power', 80.)
power_to_db_torch = power_to_db_transform(spect_transform(sound)).squeeze().cpu()
power_to_db_librosa = librosa.core.spectrum.power_to_db(out_librosa)
self.assertEqual(power_to_db_torch, torch.from_numpy(power_to_db_librosa), atol=5e-3, rtol=1e-5)
mag_to_db_transform = torchaudio.transforms.AmplitudeToDB('magnitude', 80.)
mag_to_db_torch = mag_to_db_transform(torch.abs(sound)).squeeze().cpu()
mag_to_db_librosa = librosa.core.spectrum.amplitude_to_db(sound_librosa)
self.assertEqual(mag_to_db_torch, torch.from_numpy(mag_to_db_librosa), atol=5e-3, rtol=1e-5)
power_to_db_torch = power_to_db_transform(melspect_transform(sound)).squeeze().cpu()
db_librosa = librosa.core.spectrum.power_to_db(librosa_mel)
db_librosa_tensor = torch.from_numpy(db_librosa)
self.assertEqual(
power_to_db_torch.type(db_librosa_tensor.dtype), db_librosa_tensor, atol=5e-3, rtol=1e-5)
# test MFCC
melkwargs = {'hop_length': hop_length, 'n_fft': n_fft}
mfcc_transform = torchaudio.transforms.MFCC(
sample_rate=sample_rate, n_mfcc=n_mfcc, norm='ortho', melkwargs=melkwargs)
# librosa.feature.mfcc doesn't pass kwargs properly since some of the
# kwargs for melspectrogram and mfcc are the same. We just follow the
# function body in
# https://librosa.github.io/librosa/_modules/librosa/feature/spectral.html#melspectrogram
# to mirror this function call with correct args:
#
# librosa_mfcc = librosa.feature.mfcc(
# y=sound_librosa, sr=sample_rate, n_mfcc = n_mfcc,
# hop_length=hop_length, n_fft=n_fft, htk=True, norm=None, n_mels=n_mels)
librosa_mfcc = scipy.fftpack.dct(db_librosa, axis=0, type=2, norm='ortho')[:n_mfcc]
librosa_mfcc_tensor = torch.from_numpy(librosa_mfcc)
torch_mfcc = mfcc_transform(sound).squeeze().cpu()
self.assertEqual(
torch_mfcc.type(librosa_mfcc_tensor.dtype), librosa_mfcc_tensor, atol=5e-3, rtol=1e-5)
def test_basics1(self):
kwargs = {
'n_fft': 400,
'hop_length': 200,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 40,
'sample_rate': 16000
}
self.assert_compatibilities(**kwargs)
def test_basics2(self):
kwargs = {
'n_fft': 600,
'hop_length': 100,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 20,
'sample_rate': 16000
}
self.assert_compatibilities(**kwargs)
# NOTE: Test passes offline, but fails on TravisCI (and CircleCI), see #372.
@unittest.skipIf('CI' in os.environ, 'Test is known to fail on CI')
def test_basics3(self):
kwargs = {
'n_fft': 200,
'hop_length': 50,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 50,
'sample_rate': 24000
}
self.assert_compatibilities(**kwargs)
def test_basics4(self):
kwargs = {
'n_fft': 400,
'hop_length': 200,
'power': 3.0,
'n_mels': 128,
'n_mfcc': 40,
'sample_rate': 16000
}
self.assert_compatibilities(**kwargs)
def test_MelScale(self):
"""MelScale transform is comparable to that of librosa"""
n_fft = 2048
n_mels = 256
hop_length = n_fft // 4
sample_rate = 44100
sound = common_utils.get_whitenoise(sample_rate=sample_rate, duration=60)
sound = sound.mean(dim=0, keepdim=True)
spec_ta = F.spectrogram(
sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,
hop_length=hop_length, win_length=n_fft, power=2, normalized=False)
spec_lr = spec_ta.cpu().numpy().squeeze()
# Perform MelScale with torchaudio and librosa
melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_ta)
melspec_lr = librosa.feature.melspectrogram(
S=spec_lr, sr=sample_rate, n_fft=n_fft, hop_length=hop_length,
win_length=n_fft, center=True, window='hann', n_mels=n_mels, htk=True, norm=None)
# Note: Using relaxed rtol instead of atol
self.assertEqual(melspec_ta, torch.from_numpy(melspec_lr[None, ...]), atol=1e-8, rtol=1e-3)
def test_InverseMelScale(self):
"""InverseMelScale transform is comparable to that of librosa"""
n_fft = 2048
n_mels = 256
n_stft = n_fft // 2 + 1
hop_length = n_fft // 4
# Prepare mel spectrogram input. We use torchaudio to compute one.
path = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
sound, sample_rate = common_utils.load_wav(path)
sound = sound[:, 2**10:2**10 + 2**14]
sound = sound.mean(dim=0, keepdim=True)
spec_orig = F.spectrogram(
sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,
hop_length=hop_length, win_length=n_fft, power=2, normalized=False)
melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_orig)
melspec_lr = melspec_ta.cpu().numpy().squeeze()
# Perform InverseMelScale with torch audio and librosa
spec_ta = torchaudio.transforms.InverseMelScale(
n_stft, n_mels=n_mels, sample_rate=sample_rate)(melspec_ta)
spec_lr = librosa.feature.inverse.mel_to_stft(
melspec_lr, sr=sample_rate, n_fft=n_fft, power=2.0, htk=True, norm=None)
spec_lr = torch.from_numpy(spec_lr[None, ...])
# Align dimensions
# librosa does not return power spectrogram while torchaudio returns power spectrogram
spec_orig = spec_orig.sqrt()
spec_ta = spec_ta.sqrt()
threshold = 2.0
# This threshold was choosen empirically, based on the following observation
#
# torch.dist(spec_lr, spec_ta, p=float('inf'))
# >>> tensor(1.9666)
#
# The spectrograms reconstructed by librosa and torchaudio are not comparable elementwise.
# This is because they use different approximation algorithms and resulting values can live
# in different magnitude. (although most of them are very close)
# See
# https://github.com/pytorch/audio/pull/366 for the discussion of the choice of algorithm
# https://github.com/pytorch/audio/pull/448/files#r385747021 for the distribution of P-inf
# distance over frequencies.
self.assertEqual(spec_ta, spec_lr, atol=threshold, rtol=1e-5)
threshold = 1700.0
# This threshold was choosen empirically, based on the following observations
#
# torch.dist(spec_orig, spec_ta, p=1)
# >>> tensor(1644.3516)
# torch.dist(spec_orig, spec_lr, p=1)
# >>> tensor(1420.7103)
# torch.dist(spec_lr, spec_ta, p=1)
# >>> tensor(943.2759)
assert torch.dist(spec_orig, spec_ta, p=1) < threshold
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/options.go | package fzf
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"unicode"
"github.com/junegunn/fzf/src/algo"
"github.com/junegunn/fzf/src/tui"
"github.com/mattn/go-runewidth"
"github.com/mattn/go-shellwords"
)
const usage = `usage: fzf [options]
Search
-x, --extended Extended-search mode
(enabled by default; +x or --no-extended to disable)
-e, --exact Enable Exact-match
--algo=TYPE Fuzzy matching algorithm: [v1|v2] (default: v2)
-i Case-insensitive match (default: smart-case match)
+i Case-sensitive match
--literal Do not normalize latin script letters before matching
-n, --nth=N[,..] Comma-separated list of field index expressions
for limiting search scope. Each can be a non-zero
integer or a range expression ([BEGIN]..[END]).
--with-nth=N[,..] Transform the presentation of each line using
field index expressions
-d, --delimiter=STR Field delimiter regex (default: AWK-style)
+s, --no-sort Do not sort the result
--tac Reverse the order of the input
--disabled Do not perform search
--tiebreak=CRI[,..] Comma-separated list of sort criteria to apply
when the scores are tied [length|begin|end|index]
(default: length)
Interface
-m, --multi[=MAX] Enable multi-select with tab/shift-tab
--no-mouse Disable mouse
--bind=KEYBINDS Custom key bindings. Refer to the man page.
--cycle Enable cyclic scroll
--keep-right Keep the right end of the line visible on overflow
--no-hscroll Disable horizontal scroll
--hscroll-off=COL Number of screen columns to keep to the right of the
highlighted substring (default: 10)
--filepath-word Make word-wise movements respect path separators
--jump-labels=CHARS Label characters for jump and jump-accept
Layout
--height=HEIGHT[%] Display fzf window below the cursor with the given
height instead of using fullscreen
--min-height=HEIGHT Minimum height when --height is given in percent
(default: 10)
--layout=LAYOUT Choose layout: [default|reverse|reverse-list]
--border[=STYLE] Draw border around the finder
[rounded|sharp|horizontal|vertical|
top|bottom|left|right|none] (default: rounded)
--margin=MARGIN Screen margin (TRBL | TB,RL | T,RL,B | T,R,B,L)
--padding=PADDING Padding inside border (TRBL | TB,RL | T,RL,B | T,R,B,L)
--info=STYLE Finder info style [default|inline|hidden]
--prompt=STR Input prompt (default: '> ')
--pointer=STR Pointer to the current line (default: '>')
--marker=STR Multi-select marker (default: '>')
--header=STR String to print as header
--header-lines=N The first N lines of the input are treated as header
Display
--ansi Enable processing of ANSI color codes
--tabstop=SPACES Number of spaces for a tab character (default: 8)
--color=COLSPEC Base scheme (dark|light|16|bw) and/or custom colors
--no-bold Do not use bold text
History
--history=FILE History file
--history-size=N Maximum number of history entries (default: 1000)
Preview
--preview=COMMAND Command to preview highlighted line ({})
--preview-window=OPT Preview window layout (default: right:50%)
[up|down|left|right][,SIZE[%]]
[,[no]wrap][,[no]cycle][,[no]follow][,[no]hidden]
[,border-BORDER_OPT]
[,+SCROLL[OFFSETS][/DENOM]][,~HEADER_LINES]
[,default]
Scripting
-q, --query=STR Start the finder with the given query
-1, --select-1 Automatically select the only match
-0, --exit-0 Exit immediately when there's no match
-f, --filter=STR Filter mode. Do not start interactive finder.
--print-query Print query as the first line
--expect=KEYS Comma-separated list of keys to complete fzf
--read0 Read input delimited by ASCII NUL characters
--print0 Print output delimited by ASCII NUL characters
--sync Synchronous search for multi-staged filtering
--version Display version information and exit
Environment variables
FZF_DEFAULT_COMMAND Default command to use when input is tty
FZF_DEFAULT_OPTS Default options
(e.g. '--layout=reverse --inline-info')
`
// Case denotes case-sensitivity of search
type Case int
// Case-sensitivities
const (
CaseSmart Case = iota
CaseIgnore
CaseRespect
)
// Sort criteria
type criterion int
const (
byScore criterion = iota
byLength
byBegin
byEnd
)
type sizeSpec struct {
size float64
percent bool
}
func defaultMargin() [4]sizeSpec {
return [4]sizeSpec{}
}
type windowPosition int
const (
posUp windowPosition = iota
posDown
posLeft
posRight
)
type layoutType int
const (
layoutDefault layoutType = iota
layoutReverse
layoutReverseList
)
type infoStyle int
const (
infoDefault infoStyle = iota
infoInline
infoHidden
)
type previewOpts struct {
command string
position windowPosition
size sizeSpec
scroll string
hidden bool
wrap bool
cycle bool
follow bool
border tui.BorderShape
headerLines int
}
// Options stores the values of command-line options
type Options struct {
Fuzzy bool
FuzzyAlgo algo.Algo
Extended bool
Phony bool
Case Case
Normalize bool
Nth []Range
WithNth []Range
Delimiter Delimiter
Sort int
Tac bool
Criteria []criterion
Multi int
Ansi bool
Mouse bool
Theme *tui.ColorTheme
Black bool
Bold bool
Height sizeSpec
MinHeight int
Layout layoutType
Cycle bool
KeepRight bool
Hscroll bool
HscrollOff int
FileWord bool
InfoStyle infoStyle
JumpLabels string
Prompt string
Pointer string
Marker string
Query string
Select1 bool
Exit0 bool
Filter *string
ToggleSort bool
Expect map[tui.Event]string
Keymap map[tui.Event][]action
Preview previewOpts
PrintQuery bool
ReadZero bool
Printer func(string)
PrintSep string
Sync bool
History *History
Header []string
HeaderLines int
Margin [4]sizeSpec
Padding [4]sizeSpec
BorderShape tui.BorderShape
Unicode bool
Tabstop int
ClearOnExit bool
Version bool
}
func defaultPreviewOpts(command string) previewOpts {
return previewOpts{command, posRight, sizeSpec{50, true}, "", false, false, false, false, tui.BorderRounded, 0}
}
func defaultOptions() *Options {
return &Options{
Fuzzy: true,
FuzzyAlgo: algo.FuzzyMatchV2,
Extended: true,
Phony: false,
Case: CaseSmart,
Normalize: true,
Nth: make([]Range, 0),
WithNth: make([]Range, 0),
Delimiter: Delimiter{},
Sort: 1000,
Tac: false,
Criteria: []criterion{byScore, byLength},
Multi: 0,
Ansi: false,
Mouse: true,
Theme: tui.EmptyTheme(),
Black: false,
Bold: true,
MinHeight: 10,
Layout: layoutDefault,
Cycle: false,
KeepRight: false,
Hscroll: true,
HscrollOff: 10,
FileWord: false,
InfoStyle: infoDefault,
JumpLabels: defaultJumpLabels,
Prompt: "> ",
Pointer: ">",
Marker: ">",
Query: "",
Select1: false,
Exit0: false,
Filter: nil,
ToggleSort: false,
Expect: make(map[tui.Event]string),
Keymap: make(map[tui.Event][]action),
Preview: defaultPreviewOpts(""),
PrintQuery: false,
ReadZero: false,
Printer: func(str string) { fmt.Println(str) },
PrintSep: "\n",
Sync: false,
History: nil,
Header: make([]string, 0),
HeaderLines: 0,
Margin: defaultMargin(),
Padding: defaultMargin(),
Unicode: true,
Tabstop: 8,
ClearOnExit: true,
Version: false}
}
func help(code int) {
os.Stdout.WriteString(usage)
os.Exit(code)
}
func errorExit(msg string) {
os.Stderr.WriteString(msg + "\n")
os.Exit(exitError)
}
func optString(arg string, prefixes ...string) (bool, string) {
for _, prefix := range prefixes {
if strings.HasPrefix(arg, prefix) {
return true, arg[len(prefix):]
}
}
return false, ""
}
func nextString(args []string, i *int, message string) string {
if len(args) > *i+1 {
*i++
} else {
errorExit(message)
}
return args[*i]
}
func optionalNextString(args []string, i *int) (bool, string) {
if len(args) > *i+1 && !strings.HasPrefix(args[*i+1], "-") && !strings.HasPrefix(args[*i+1], "+") {
*i++
return true, args[*i]
}
return false, ""
}
func atoi(str string) int {
num, err := strconv.Atoi(str)
if err != nil {
errorExit("not a valid integer: " + str)
}
return num
}
func atof(str string) float64 {
num, err := strconv.ParseFloat(str, 64)
if err != nil {
errorExit("not a valid number: " + str)
}
return num
}
func nextInt(args []string, i *int, message string) int {
if len(args) > *i+1 {
*i++
} else {
errorExit(message)
}
return atoi(args[*i])
}
func optionalNumeric(args []string, i *int, defaultValue int) int {
if len(args) > *i+1 {
if strings.IndexAny(args[*i+1], "0123456789") == 0 {
*i++
return atoi(args[*i])
}
}
return defaultValue
}
func splitNth(str string) []Range {
if match, _ := regexp.MatchString("^[0-9,-.]+$", str); !match {
errorExit("invalid format: " + str)
}
tokens := strings.Split(str, ",")
ranges := make([]Range, len(tokens))
for idx, s := range tokens {
r, ok := ParseRange(&s)
if !ok {
errorExit("invalid format: " + str)
}
ranges[idx] = r
}
return ranges
}
func delimiterRegexp(str string) Delimiter {
// Special handling of \t
str = strings.Replace(str, "\\t", "\t", -1)
// 1. Pattern does not contain any special character
if regexp.QuoteMeta(str) == str {
return Delimiter{str: &str}
}
rx, e := regexp.Compile(str)
// 2. Pattern is not a valid regular expression
if e != nil {
return Delimiter{str: &str}
}
// 3. Pattern as regular expression. Slow.
return Delimiter{regex: rx}
}
func isAlphabet(char uint8) bool {
return char >= 'a' && char <= 'z'
}
func isNumeric(char uint8) bool {
return char >= '0' && char <= '9'
}
func parseAlgo(str string) algo.Algo {
switch str {
case "v1":
return algo.FuzzyMatchV1
case "v2":
return algo.FuzzyMatchV2
default:
errorExit("invalid algorithm (expected: v1 or v2)")
}
return algo.FuzzyMatchV2
}
func parseBorder(str string, optional bool) tui.BorderShape {
switch str {
case "rounded":
return tui.BorderRounded
case "sharp":
return tui.BorderSharp
case "horizontal":
return tui.BorderHorizontal
case "vertical":
return tui.BorderVertical
case "top":
return tui.BorderTop
case "bottom":
return tui.BorderBottom
case "left":
return tui.BorderLeft
case "right":
return tui.BorderRight
case "none":
return tui.BorderNone
default:
if optional && str == "" {
return tui.BorderRounded
}
errorExit("invalid border style (expected: rounded|sharp|horizontal|vertical|top|bottom|left|right|none)")
}
return tui.BorderNone
}
func parseKeyChords(str string, message string) map[tui.Event]string {
if len(str) == 0 {
errorExit(message)
}
str = regexp.MustCompile("(?i)(alt-),").ReplaceAllString(str, "$1"+string([]rune{escapedComma}))
tokens := strings.Split(str, ",")
if str == "," || strings.HasPrefix(str, ",,") || strings.HasSuffix(str, ",,") || strings.Contains(str, ",,,") {
tokens = append(tokens, ",")
}
chords := make(map[tui.Event]string)
for _, key := range tokens {
if len(key) == 0 {
continue // ignore
}
key = strings.ReplaceAll(key, string([]rune{escapedComma}), ",")
lkey := strings.ToLower(key)
add := func(e tui.EventType) {
chords[e.AsEvent()] = key
}
switch lkey {
case "up":
add(tui.Up)
case "down":
add(tui.Down)
case "left":
add(tui.Left)
case "right":
add(tui.Right)
case "enter", "return":
add(tui.CtrlM)
case "space":
chords[tui.Key(' ')] = key
case "bspace", "bs":
add(tui.BSpace)
case "ctrl-space":
add(tui.CtrlSpace)
case "ctrl-^", "ctrl-6":
add(tui.CtrlCaret)
case "ctrl-/", "ctrl-_":
add(tui.CtrlSlash)
case "ctrl-\\":
add(tui.CtrlBackSlash)
case "ctrl-]":
add(tui.CtrlRightBracket)
case "change":
add(tui.Change)
case "backward-eof":
add(tui.BackwardEOF)
case "alt-enter", "alt-return":
chords[tui.CtrlAltKey('m')] = key
case "alt-space":
chords[tui.AltKey(' ')] = key
case "alt-bs", "alt-bspace":
add(tui.AltBS)
case "alt-up":
add(tui.AltUp)
case "alt-down":
add(tui.AltDown)
case "alt-left":
add(tui.AltLeft)
case "alt-right":
add(tui.AltRight)
case "tab":
add(tui.Tab)
case "btab", "shift-tab":
add(tui.BTab)
case "esc":
add(tui.ESC)
case "del":
add(tui.Del)
case "home":
add(tui.Home)
case "end":
add(tui.End)
case "insert":
add(tui.Insert)
case "pgup", "page-up":
add(tui.PgUp)
case "pgdn", "page-down":
add(tui.PgDn)
case "alt-shift-up", "shift-alt-up":
add(tui.AltSUp)
case "alt-shift-down", "shift-alt-down":
add(tui.AltSDown)
case "alt-shift-left", "shift-alt-left":
add(tui.AltSLeft)
case "alt-shift-right", "shift-alt-right":
add(tui.AltSRight)
case "shift-up":
add(tui.SUp)
case "shift-down":
add(tui.SDown)
case "shift-left":
add(tui.SLeft)
case "shift-right":
add(tui.SRight)
case "left-click":
add(tui.LeftClick)
case "right-click":
add(tui.RightClick)
case "double-click":
add(tui.DoubleClick)
case "f10":
add(tui.F10)
case "f11":
add(tui.F11)
case "f12":
add(tui.F12)
default:
runes := []rune(key)
if len(key) == 10 && strings.HasPrefix(lkey, "ctrl-alt-") && isAlphabet(lkey[9]) {
chords[tui.CtrlAltKey(rune(key[9]))] = key
} else if len(key) == 6 && strings.HasPrefix(lkey, "ctrl-") && isAlphabet(lkey[5]) {
add(tui.EventType(tui.CtrlA.Int() + int(lkey[5]) - 'a'))
} else if len(runes) == 5 && strings.HasPrefix(lkey, "alt-") {
r := runes[4]
switch r {
case escapedColon:
r = ':'
case escapedComma:
r = ','
case escapedPlus:
r = '+'
}
chords[tui.AltKey(r)] = key
} else if len(key) == 2 && strings.HasPrefix(lkey, "f") && key[1] >= '1' && key[1] <= '9' {
add(tui.EventType(tui.F1.Int() + int(key[1]) - '1'))
} else if len(runes) == 1 {
chords[tui.Key(runes[0])] = key
} else {
errorExit("unsupported key: " + key)
}
}
}
return chords
}
func parseTiebreak(str string) []criterion {
criteria := []criterion{byScore}
hasIndex := false
hasLength := false
hasBegin := false
hasEnd := false
check := func(notExpected *bool, name string) {
if *notExpected {
errorExit("duplicate sort criteria: " + name)
}
if hasIndex {
errorExit("index should be the last criterion")
}
*notExpected = true
}
for _, str := range strings.Split(strings.ToLower(str), ",") {
switch str {
case "index":
check(&hasIndex, "index")
case "length":
check(&hasLength, "length")
criteria = append(criteria, byLength)
case "begin":
check(&hasBegin, "begin")
criteria = append(criteria, byBegin)
case "end":
check(&hasEnd, "end")
criteria = append(criteria, byEnd)
default:
errorExit("invalid sort criterion: " + str)
}
}
return criteria
}
func dupeTheme(theme *tui.ColorTheme) *tui.ColorTheme {
dupe := *theme
return &dupe
}
func parseTheme(defaultTheme *tui.ColorTheme, str string) *tui.ColorTheme {
theme := dupeTheme(defaultTheme)
rrggbb := regexp.MustCompile("^#[0-9a-fA-F]{6}$")
for _, str := range strings.Split(strings.ToLower(str), ",") {
switch str {
case "dark":
theme = dupeTheme(tui.Dark256)
case "light":
theme = dupeTheme(tui.Light256)
case "16":
theme = dupeTheme(tui.Default16)
case "bw", "no":
theme = tui.NoColorTheme()
default:
fail := func() {
errorExit("invalid color specification: " + str)
}
// Color is disabled
if theme == nil {
continue
}
components := strings.Split(str, ":")
if len(components) < 2 {
fail()
}
mergeAttr := func(cattr *tui.ColorAttr) {
for _, component := range components[1:] {
switch component {
case "regular":
cattr.Attr = tui.AttrRegular
case "bold", "strong":
cattr.Attr |= tui.Bold
case "dim":
cattr.Attr |= tui.Dim
case "italic":
cattr.Attr |= tui.Italic
case "underline":
cattr.Attr |= tui.Underline
case "blink":
cattr.Attr |= tui.Blink
case "reverse":
cattr.Attr |= tui.Reverse
case "black":
cattr.Color = tui.Color(0)
case "red":
cattr.Color = tui.Color(1)
case "green":
cattr.Color = tui.Color(2)
case "yellow":
cattr.Color = tui.Color(3)
case "blue":
cattr.Color = tui.Color(4)
case "magenta":
cattr.Color = tui.Color(5)
case "cyan":
cattr.Color = tui.Color(6)
case "white":
cattr.Color = tui.Color(7)
case "bright-black", "gray", "grey":
cattr.Color = tui.Color(8)
case "bright-red":
cattr.Color = tui.Color(9)
case "bright-green":
cattr.Color = tui.Color(10)
case "bright-yellow":
cattr.Color = tui.Color(11)
case "bright-blue":
cattr.Color = tui.Color(12)
case "bright-magenta":
cattr.Color = tui.Color(13)
case "bright-cyan":
cattr.Color = tui.Color(14)
case "bright-white":
cattr.Color = tui.Color(15)
case "":
default:
if rrggbb.MatchString(component) {
cattr.Color = tui.HexToColor(component)
} else {
ansi32, err := strconv.Atoi(component)
if err != nil || ansi32 < -1 || ansi32 > 255 {
fail()
}
cattr.Color = tui.Color(ansi32)
}
}
}
}
switch components[0] {
case "query", "input":
mergeAttr(&theme.Input)
case "disabled":
mergeAttr(&theme.Disabled)
case "fg":
mergeAttr(&theme.Fg)
case "bg":
mergeAttr(&theme.Bg)
case "preview-fg":
mergeAttr(&theme.PreviewFg)
case "preview-bg":
mergeAttr(&theme.PreviewBg)
case "fg+":
mergeAttr(&theme.Current)
case "bg+":
mergeAttr(&theme.DarkBg)
case "gutter":
mergeAttr(&theme.Gutter)
case "hl":
mergeAttr(&theme.Match)
case "hl+":
mergeAttr(&theme.CurrentMatch)
case "border":
mergeAttr(&theme.Border)
case "prompt":
mergeAttr(&theme.Prompt)
case "spinner":
mergeAttr(&theme.Spinner)
case "info":
mergeAttr(&theme.Info)
case "pointer":
mergeAttr(&theme.Cursor)
case "marker":
mergeAttr(&theme.Selected)
case "header":
mergeAttr(&theme.Header)
default:
fail()
}
}
}
return theme
}
var executeRegexp *regexp.Regexp
func firstKey(keymap map[tui.Event]string) tui.Event {
for k := range keymap {
return k
}
return tui.EventType(0).AsEvent()
}
const (
escapedColon = 0
escapedComma = 1
escapedPlus = 2
)
func init() {
// Backreferences are not supported.
// "~!@#$%^&*;/|".each_char.map { |c| Regexp.escape(c) }.map { |c| "#{c}[^#{c}]*#{c}" }.join('|')
executeRegexp = regexp.MustCompile(
`(?si)[:+](execute(?:-multi|-silent)?|reload|preview|change-prompt|unbind):.+|[:+](execute(?:-multi|-silent)?|reload|preview|change-prompt|unbind)(\([^)]*\)|\[[^\]]*\]|~[^~]*~|![^!]*!|@[^@]*@|\#[^\#]*\#|\$[^\$]*\$|%[^%]*%|\^[^\^]*\^|&[^&]*&|\*[^\*]*\*|;[^;]*;|/[^/]*/|\|[^\|]*\|)`)
}
func parseKeymap(keymap map[tui.Event][]action, str string) {
masked := executeRegexp.ReplaceAllStringFunc(str, func(src string) string {
symbol := ":"
if strings.HasPrefix(src, "+") {
symbol = "+"
}
prefix := symbol + "execute"
if strings.HasPrefix(src[1:], "reload") {
prefix = symbol + "reload"
} else if strings.HasPrefix(src[1:], "preview") {
prefix = symbol + "preview"
} else if strings.HasPrefix(src[1:], "unbind") {
prefix = symbol + "unbind"
} else if strings.HasPrefix(src[1:], "change-prompt") {
prefix = symbol + "change-prompt"
} else if src[len(prefix)] == '-' {
c := src[len(prefix)+1]
if c == 's' || c == 'S' {
prefix += "-silent"
} else {
prefix += "-multi"
}
}
return prefix + "(" + strings.Repeat(" ", len(src)-len(prefix)-2) + ")"
})
masked = strings.Replace(masked, "::", string([]rune{escapedColon, ':'}), -1)
masked = strings.Replace(masked, ",:", string([]rune{escapedComma, ':'}), -1)
masked = strings.Replace(masked, "+:", string([]rune{escapedPlus, ':'}), -1)
idx := 0
for _, pairStr := range strings.Split(masked, ",") {
origPairStr := str[idx : idx+len(pairStr)]
idx += len(pairStr) + 1
pair := strings.SplitN(pairStr, ":", 2)
if len(pair) < 2 {
errorExit("bind action not specified: " + origPairStr)
}
var key tui.Event
if len(pair[0]) == 1 && pair[0][0] == escapedColon {
key = tui.Key(':')
} else if len(pair[0]) == 1 && pair[0][0] == escapedComma {
key = tui.Key(',')
} else if len(pair[0]) == 1 && pair[0][0] == escapedPlus {
key = tui.Key('+')
} else {
keys := parseKeyChords(pair[0], "key name required")
key = firstKey(keys)
}
idx2 := len(pair[0]) + 1
specs := strings.Split(pair[1], "+")
actions := make([]action, 0, len(specs))
appendAction := func(types ...actionType) {
actions = append(actions, toActions(types...)...)
}
prevSpec := ""
for specIndex, maskedSpec := range specs {
spec := origPairStr[idx2 : idx2+len(maskedSpec)]
idx2 += len(maskedSpec) + 1
spec = prevSpec + spec
specLower := strings.ToLower(spec)
switch specLower {
case "ignore":
appendAction(actIgnore)
case "beginning-of-line":
appendAction(actBeginningOfLine)
case "abort":
appendAction(actAbort)
case "accept":
appendAction(actAccept)
case "accept-non-empty":
appendAction(actAcceptNonEmpty)
case "print-query":
appendAction(actPrintQuery)
case "refresh-preview":
appendAction(actRefreshPreview)
case "replace-query":
appendAction(actReplaceQuery)
case "backward-char":
appendAction(actBackwardChar)
case "backward-delete-char":
appendAction(actBackwardDeleteChar)
case "backward-delete-char/eof":
appendAction(actBackwardDeleteCharEOF)
case "backward-word":
appendAction(actBackwardWord)
case "clear-screen":
appendAction(actClearScreen)
case "delete-char":
appendAction(actDeleteChar)
case "delete-char/eof":
appendAction(actDeleteCharEOF)
case "deselect":
appendAction(actDeselect)
case "end-of-line":
appendAction(actEndOfLine)
case "cancel":
appendAction(actCancel)
case "clear-query":
appendAction(actClearQuery)
case "clear-selection":
appendAction(actClearSelection)
case "forward-char":
appendAction(actForwardChar)
case "forward-word":
appendAction(actForwardWord)
case "jump":
appendAction(actJump)
case "jump-accept":
appendAction(actJumpAccept)
case "kill-line":
appendAction(actKillLine)
case "kill-word":
appendAction(actKillWord)
case "unix-line-discard", "line-discard":
appendAction(actUnixLineDiscard)
case "unix-word-rubout", "word-rubout":
appendAction(actUnixWordRubout)
case "yank":
appendAction(actYank)
case "backward-kill-word":
appendAction(actBackwardKillWord)
case "toggle-down":
appendAction(actToggle, actDown)
case "toggle-up":
appendAction(actToggle, actUp)
case "toggle-in":
appendAction(actToggleIn)
case "toggle-out":
appendAction(actToggleOut)
case "toggle-all":
appendAction(actToggleAll)
case "toggle-search":
appendAction(actToggleSearch)
case "select":
appendAction(actSelect)
case "select-all":
appendAction(actSelectAll)
case "deselect-all":
appendAction(actDeselectAll)
case "close":
appendAction(actClose)
case "toggle":
appendAction(actToggle)
case "down":
appendAction(actDown)
case "up":
appendAction(actUp)
case "first", "top":
appendAction(actFirst)
case "last":
appendAction(actLast)
case "page-up":
appendAction(actPageUp)
case "page-down":
appendAction(actPageDown)
case "half-page-up":
appendAction(actHalfPageUp)
case "half-page-down":
appendAction(actHalfPageDown)
case "previous-history":
appendAction(actPreviousHistory)
case "next-history":
appendAction(actNextHistory)
case "toggle-preview":
appendAction(actTogglePreview)
case "toggle-preview-wrap":
appendAction(actTogglePreviewWrap)
case "toggle-sort":
appendAction(actToggleSort)
case "preview-top":
appendAction(actPreviewTop)
case "preview-bottom":
appendAction(actPreviewBottom)
case "preview-up":
appendAction(actPreviewUp)
case "preview-down":
appendAction(actPreviewDown)
case "preview-page-up":
appendAction(actPreviewPageUp)
case "preview-page-down":
appendAction(actPreviewPageDown)
case "preview-half-page-up":
appendAction(actPreviewHalfPageUp)
case "preview-half-page-down":
appendAction(actPreviewHalfPageDown)
case "enable-search":
appendAction(actEnableSearch)
case "disable-search":
appendAction(actDisableSearch)
default:
t := isExecuteAction(specLower)
if t == actIgnore {
if specIndex == 0 && specLower == "" {
actions = append(keymap[key], actions...)
} else {
errorExit("unknown action: " + spec)
}
} else {
var offset int
switch t {
case actReload:
offset = len("reload")
case actPreview:
offset = len("preview")
case actChangePrompt:
offset = len("change-prompt")
case actUnbind:
offset = len("unbind")
case actExecuteSilent:
offset = len("execute-silent")
case actExecuteMulti:
offset = len("execute-multi")
default:
offset = len("execute")
}
var actionArg string
if spec[offset] == ':' {
if specIndex == len(specs)-1 {
actionArg = spec[offset+1:]
actions = append(actions, action{t: t, a: actionArg})
} else {
prevSpec = spec + "+"
continue
}
} else {
actionArg = spec[offset+1 : len(spec)-1]
actions = append(actions, action{t: t, a: actionArg})
}
if t == actUnbind {
parseKeyChords(actionArg, "unbind target required")
}
}
}
prevSpec = ""
}
keymap[key] = actions
}
}
func isExecuteAction(str string) actionType {
matches := executeRegexp.FindAllStringSubmatch(":"+str, -1)
if matches == nil || len(matches) != 1 {
return actIgnore
}
prefix := matches[0][1]
if len(prefix) == 0 {
prefix = matches[0][2]
}
switch prefix {
case "reload":
return actReload
case "unbind":
return actUnbind
case "preview":
return actPreview
case "change-prompt":
return actChangePrompt
case "execute":
return actExecute
case "execute-silent":
return actExecuteSilent
case "execute-multi":
return actExecuteMulti
}
return actIgnore
}
func parseToggleSort(keymap map[tui.Event][]action, str string) {
keys := parseKeyChords(str, "key name required")
if len(keys) != 1 {
errorExit("multiple keys specified")
}
keymap[firstKey(keys)] = toActions(actToggleSort)
}
func strLines(str string) []string {
return strings.Split(strings.TrimSuffix(str, "\n"), "\n")
}
func parseSize(str string, maxPercent float64, label string) sizeSpec {
var val float64
percent := strings.HasSuffix(str, "%")
if percent {
val = atof(str[:len(str)-1])
if val < 0 {
errorExit(label + " must be non-negative")
}
if val > maxPercent {
errorExit(fmt.Sprintf("%s too large (max: %d%%)", label, int(maxPercent)))
}
} else {
if strings.Contains(str, ".") {
errorExit(label + " (without %) must be a non-negative integer")
}
val = float64(atoi(str))
if val < 0 {
errorExit(label + " must be non-negative")
}
}
return sizeSpec{val, percent}
}
func parseHeight(str string) sizeSpec {
size := parseSize(str, 100, "height")
return size
}
func parseLayout(str string) layoutType {
switch str {
case "default":
return layoutDefault
case "reverse":
return layoutReverse
case "reverse-list":
return layoutReverseList
default:
errorExit("invalid layout (expected: default / reverse / reverse-list)")
}
return layoutDefault
}
func parseInfoStyle(str string) infoStyle {
switch str {
case "default":
return infoDefault
case "inline":
return infoInline
case "hidden":
return infoHidden
default:
errorExit("invalid info style (expected: default / inline / hidden)")
}
return infoDefault
}
func parsePreviewWindow(opts *previewOpts, input string) {
delimRegex := regexp.MustCompile("[:,]") // : for backward compatibility
sizeRegex := regexp.MustCompile("^[0-9]+%?$")
offsetRegex := regexp.MustCompile(`^(\+{-?[0-9]+})?([+-][0-9]+)*(-?/[1-9][0-9]*)?$`)
headerRegex := regexp.MustCompile("^~(0|[1-9][0-9]*)$")
tokens := delimRegex.Split(input, -1)
for _, token := range tokens {
switch token {
case "":
case "default":
*opts = defaultPreviewOpts(opts.command)
case "hidden":
opts.hidden = true
case "nohidden":
opts.hidden = false
case "wrap":
opts.wrap = true
case "nowrap":
opts.wrap = false
case "cycle":
opts.cycle = true
case "nocycle":
opts.cycle = false
case "up", "top":
opts.position = posUp
case "down", "bottom":
opts.position = posDown
case "left":
opts.position = posLeft
case "right":
opts.position = posRight
case "rounded", "border", "border-rounded":
opts.border = tui.BorderRounded
case "sharp", "border-sharp":
opts.border = tui.BorderSharp
case "noborder", "border-none":
opts.border = tui.BorderNone
case "border-horizontal":
opts.border = tui.BorderHorizontal
case "border-vertical":
opts.border = tui.BorderVertical
case "border-top":
opts.border = tui.BorderTop
case "border-bottom":
opts.border = tui.BorderBottom
case "border-left":
opts.border = tui.BorderLeft
case "border-right":
opts.border = tui.BorderRight
case "follow":
opts.follow = true
case "nofollow":
opts.follow = false
default:
if headerRegex.MatchString(token) {
opts.headerLines = atoi(token[1:])
} else if sizeRegex.MatchString(token) {
opts.size = parseSize(token, 99, "window size")
} else if offsetRegex.MatchString(token) {
opts.scroll = token
} else {
errorExit("invalid preview window option: " + token)
}
}
}
}
func parseMargin(opt string, margin string) [4]sizeSpec {
margins := strings.Split(margin, ",")
checked := func(str string) sizeSpec {
return parseSize(str, 49, opt)
}
switch len(margins) {
case 1:
m := checked(margins[0])
return [4]sizeSpec{m, m, m, m}
case 2:
tb := checked(margins[0])
rl := checked(margins[1])
return [4]sizeSpec{tb, rl, tb, rl}
case 3:
t := checked(margins[0])
rl := checked(margins[1])
b := checked(margins[2])
return [4]sizeSpec{t, rl, b, rl}
case 4:
return [4]sizeSpec{
checked(margins[0]), checked(margins[1]),
checked(margins[2]), checked(margins[3])}
default:
errorExit("invalid " + opt + ": " + margin)
}
return defaultMargin()
}
func parseOptions(opts *Options, allArgs []string) {
var historyMax int
if opts.History == nil {
historyMax = defaultHistoryMax
} else {
historyMax = opts.History.maxSize
}
setHistory := func(path string) {
h, e := NewHistory(path, historyMax)
if e != nil {
errorExit(e.Error())
}
opts.History = h
}
setHistoryMax := func(max int) {
historyMax = max
if historyMax < 1 {
errorExit("history max must be a positive integer")
}
if opts.History != nil {
opts.History.maxSize = historyMax
}
}
validateJumpLabels := false
validatePointer := false
validateMarker := false
for i := 0; i < len(allArgs); i++ {
arg := allArgs[i]
switch arg {
case "-h", "--help":
help(exitOk)
case "-x", "--extended":
opts.Extended = true
case "-e", "--exact":
opts.Fuzzy = false
case "--extended-exact":
// Note that we now don't have --no-extended-exact
opts.Fuzzy = false
opts.Extended = true
case "+x", "--no-extended":
opts.Extended = false
case "+e", "--no-exact":
opts.Fuzzy = true
case "-q", "--query":
opts.Query = nextString(allArgs, &i, "query string required")
case "-f", "--filter":
filter := nextString(allArgs, &i, "query string required")
opts.Filter = &filter
case "--literal":
opts.Normalize = false
case "--no-literal":
opts.Normalize = true
case "--algo":
opts.FuzzyAlgo = parseAlgo(nextString(allArgs, &i, "algorithm required (v1|v2)"))
case "--expect":
for k, v := range parseKeyChords(nextString(allArgs, &i, "key names required"), "key names required") {
opts.Expect[k] = v
}
case "--no-expect":
opts.Expect = make(map[tui.Event]string)
case "--enabled", "--no-phony":
opts.Phony = false
case "--disabled", "--phony":
opts.Phony = true
case "--tiebreak":
opts.Criteria = parseTiebreak(nextString(allArgs, &i, "sort criterion required"))
case "--bind":
parseKeymap(opts.Keymap, nextString(allArgs, &i, "bind expression required"))
case "--color":
_, spec := optionalNextString(allArgs, &i)
if len(spec) == 0 {
opts.Theme = tui.EmptyTheme()
} else {
opts.Theme = parseTheme(opts.Theme, spec)
}
case "--toggle-sort":
parseToggleSort(opts.Keymap, nextString(allArgs, &i, "key name required"))
case "-d", "--delimiter":
opts.Delimiter = delimiterRegexp(nextString(allArgs, &i, "delimiter required"))
case "-n", "--nth":
opts.Nth = splitNth(nextString(allArgs, &i, "nth expression required"))
case "--with-nth":
opts.WithNth = splitNth(nextString(allArgs, &i, "nth expression required"))
case "-s", "--sort":
opts.Sort = optionalNumeric(allArgs, &i, 1)
case "+s", "--no-sort":
opts.Sort = 0
case "--tac":
opts.Tac = true
case "--no-tac":
opts.Tac = false
case "-i":
opts.Case = CaseIgnore
case "+i":
opts.Case = CaseRespect
case "-m", "--multi":
opts.Multi = optionalNumeric(allArgs, &i, maxMulti)
case "+m", "--no-multi":
opts.Multi = 0
case "--ansi":
opts.Ansi = true
case "--no-ansi":
opts.Ansi = false
case "--no-mouse":
opts.Mouse = false
case "+c", "--no-color":
opts.Theme = tui.NoColorTheme()
case "+2", "--no-256":
opts.Theme = tui.Default16
case "--black":
opts.Black = true
case "--no-black":
opts.Black = false
case "--bold":
opts.Bold = true
case "--no-bold":
opts.Bold = false
case "--layout":
opts.Layout = parseLayout(
nextString(allArgs, &i, "layout required (default / reverse / reverse-list)"))
case "--reverse":
opts.Layout = layoutReverse
case "--no-reverse":
opts.Layout = layoutDefault
case "--cycle":
opts.Cycle = true
case "--no-cycle":
opts.Cycle = false
case "--keep-right":
opts.KeepRight = true
case "--no-keep-right":
opts.KeepRight = false
case "--hscroll":
opts.Hscroll = true
case "--no-hscroll":
opts.Hscroll = false
case "--hscroll-off":
opts.HscrollOff = nextInt(allArgs, &i, "hscroll offset required")
case "--filepath-word":
opts.FileWord = true
case "--no-filepath-word":
opts.FileWord = false
case "--info":
opts.InfoStyle = parseInfoStyle(
nextString(allArgs, &i, "info style required"))
case "--no-info":
opts.InfoStyle = infoHidden
case "--inline-info":
opts.InfoStyle = infoInline
case "--no-inline-info":
opts.InfoStyle = infoDefault
case "--jump-labels":
opts.JumpLabels = nextString(allArgs, &i, "label characters required")
validateJumpLabels = true
case "-1", "--select-1":
opts.Select1 = true
case "+1", "--no-select-1":
opts.Select1 = false
case "-0", "--exit-0":
opts.Exit0 = true
case "+0", "--no-exit-0":
opts.Exit0 = false
case "--read0":
opts.ReadZero = true
case "--no-read0":
opts.ReadZero = false
case "--print0":
opts.Printer = func(str string) { fmt.Print(str, "\x00") }
opts.PrintSep = "\x00"
case "--no-print0":
opts.Printer = func(str string) { fmt.Println(str) }
opts.PrintSep = "\n"
case "--print-query":
opts.PrintQuery = true
case "--no-print-query":
opts.PrintQuery = false
case "--prompt":
opts.Prompt = nextString(allArgs, &i, "prompt string required")
case "--pointer":
opts.Pointer = nextString(allArgs, &i, "pointer sign string required")
validatePointer = true
case "--marker":
opts.Marker = nextString(allArgs, &i, "selected sign string required")
validateMarker = true
case "--sync":
opts.Sync = true
case "--no-sync":
opts.Sync = false
case "--async":
opts.Sync = false
case "--no-history":
opts.History = nil
case "--history":
setHistory(nextString(allArgs, &i, "history file path required"))
case "--history-size":
setHistoryMax(nextInt(allArgs, &i, "history max size required"))
case "--no-header":
opts.Header = []string{}
case "--no-header-lines":
opts.HeaderLines = 0
case "--header":
opts.Header = strLines(nextString(allArgs, &i, "header string required"))
case "--header-lines":
opts.HeaderLines = atoi(
nextString(allArgs, &i, "number of header lines required"))
case "--preview":
opts.Preview.command = nextString(allArgs, &i, "preview command required")
case "--no-preview":
opts.Preview.command = ""
case "--preview-window":
parsePreviewWindow(&opts.Preview,
nextString(allArgs, &i, "preview window layout required: [up|down|left|right][,SIZE[%]][,border-BORDER_OPT][,wrap][,cycle][,hidden][,+SCROLL[OFFSETS][/DENOM]][,~HEADER_LINES][,default]"))
case "--height":
opts.Height = parseHeight(nextString(allArgs, &i, "height required: HEIGHT[%]"))
case "--min-height":
opts.MinHeight = nextInt(allArgs, &i, "height required: HEIGHT")
case "--no-height":
opts.Height = sizeSpec{}
case "--no-margin":
opts.Margin = defaultMargin()
case "--no-padding":
opts.Padding = defaultMargin()
case "--no-border":
opts.BorderShape = tui.BorderNone
case "--border":
hasArg, arg := optionalNextString(allArgs, &i)
opts.BorderShape = parseBorder(arg, !hasArg)
case "--no-unicode":
opts.Unicode = false
case "--unicode":
opts.Unicode = true
case "--margin":
opts.Margin = parseMargin(
"margin",
nextString(allArgs, &i, "margin required (TRBL / TB,RL / T,RL,B / T,R,B,L)"))
case "--padding":
opts.Padding = parseMargin(
"padding",
nextString(allArgs, &i, "padding required (TRBL / TB,RL / T,RL,B / T,R,B,L)"))
case "--tabstop":
opts.Tabstop = nextInt(allArgs, &i, "tab stop required")
case "--clear":
opts.ClearOnExit = true
case "--no-clear":
opts.ClearOnExit = false
case "--version":
opts.Version = true
default:
if match, value := optString(arg, "--algo="); match {
opts.FuzzyAlgo = parseAlgo(value)
} else if match, value := optString(arg, "-q", "--query="); match {
opts.Query = value
} else if match, value := optString(arg, "-f", "--filter="); match {
opts.Filter = &value
} else if match, value := optString(arg, "-d", "--delimiter="); match {
opts.Delimiter = delimiterRegexp(value)
} else if match, value := optString(arg, "--border="); match {
opts.BorderShape = parseBorder(value, false)
} else if match, value := optString(arg, "--prompt="); match {
opts.Prompt = value
} else if match, value := optString(arg, "--pointer="); match {
opts.Pointer = value
validatePointer = true
} else if match, value := optString(arg, "--marker="); match {
opts.Marker = value
validateMarker = true
} else if match, value := optString(arg, "-n", "--nth="); match {
opts.Nth = splitNth(value)
} else if match, value := optString(arg, "--with-nth="); match {
opts.WithNth = splitNth(value)
} else if match, _ := optString(arg, "-s", "--sort="); match {
opts.Sort = 1 // Don't care
} else if match, value := optString(arg, "-m", "--multi="); match {
opts.Multi = atoi(value)
} else if match, value := optString(arg, "--height="); match {
opts.Height = parseHeight(value)
} else if match, value := optString(arg, "--min-height="); match {
opts.MinHeight = atoi(value)
} else if match, value := optString(arg, "--layout="); match {
opts.Layout = parseLayout(value)
} else if match, value := optString(arg, "--info="); match {
opts.InfoStyle = parseInfoStyle(value)
} else if match, value := optString(arg, "--toggle-sort="); match {
parseToggleSort(opts.Keymap, value)
} else if match, value := optString(arg, "--expect="); match {
for k, v := range parseKeyChords(value, "key names required") {
opts.Expect[k] = v
}
} else if match, value := optString(arg, "--tiebreak="); match {
opts.Criteria = parseTiebreak(value)
} else if match, value := optString(arg, "--color="); match {
opts.Theme = parseTheme(opts.Theme, value)
} else if match, value := optString(arg, "--bind="); match {
parseKeymap(opts.Keymap, value)
} else if match, value := optString(arg, "--history="); match {
setHistory(value)
} else if match, value := optString(arg, "--history-size="); match {
setHistoryMax(atoi(value))
} else if match, value := optString(arg, "--header="); match {
opts.Header = strLines(value)
} else if match, value := optString(arg, "--header-lines="); match {
opts.HeaderLines = atoi(value)
} else if match, value := optString(arg, "--preview="); match {
opts.Preview.command = value
} else if match, value := optString(arg, "--preview-window="); match {
parsePreviewWindow(&opts.Preview, value)
} else if match, value := optString(arg, "--margin="); match {
opts.Margin = parseMargin("margin", value)
} else if match, value := optString(arg, "--padding="); match {
opts.Padding = parseMargin("padding", value)
} else if match, value := optString(arg, "--tabstop="); match {
opts.Tabstop = atoi(value)
} else if match, value := optString(arg, "--hscroll-off="); match {
opts.HscrollOff = atoi(value)
} else if match, value := optString(arg, "--jump-labels="); match {
opts.JumpLabels = value
validateJumpLabels = true
} else {
errorExit("unknown option: " + arg)
}
}
}
if opts.HeaderLines < 0 {
errorExit("header lines must be a non-negative integer")
}
if opts.HscrollOff < 0 {
errorExit("hscroll offset must be a non-negative integer")
}
if opts.Tabstop < 1 {
errorExit("tab stop must be a positive integer")
}
if len(opts.JumpLabels) == 0 {
errorExit("empty jump labels")
}
if validateJumpLabels {
for _, r := range opts.JumpLabels {
if r < 32 || r > 126 {
errorExit("non-ascii jump labels are not allowed")
}
}
}
if validatePointer {
if err := validateSign(opts.Pointer, "pointer"); err != nil {
errorExit(err.Error())
}
}
if validateMarker {
if err := validateSign(opts.Marker, "marker"); err != nil {
errorExit(err.Error())
}
}
}
func validateSign(sign string, signOptName string) error {
if sign == "" {
return fmt.Errorf("%v cannot be empty", signOptName)
}
for _, r := range sign {
if !unicode.IsGraphic(r) {
return fmt.Errorf("invalid character in %v", signOptName)
}
}
if runewidth.StringWidth(sign) > 2 {
return fmt.Errorf("%v display width should be up to 2", signOptName)
}
return nil
}
func postProcessOptions(opts *Options) {
if !opts.Version && !tui.IsLightRendererSupported() && opts.Height.size > 0 {
errorExit("--height option is currently not supported on this platform")
}
// Default actions for CTRL-N / CTRL-P when --history is set
if opts.History != nil {
if _, prs := opts.Keymap[tui.CtrlP.AsEvent()]; !prs {
opts.Keymap[tui.CtrlP.AsEvent()] = toActions(actPreviousHistory)
}
if _, prs := opts.Keymap[tui.CtrlN.AsEvent()]; !prs {
opts.Keymap[tui.CtrlN.AsEvent()] = toActions(actNextHistory)
}
}
// Extend the default key map
keymap := defaultKeymap()
for key, actions := range opts.Keymap {
for _, act := range actions {
if act.t == actToggleSort {
opts.ToggleSort = true
}
}
keymap[key] = actions
}
opts.Keymap = keymap
// If we're not using extended search mode, --nth option becomes irrelevant
// if it contains the whole range
if !opts.Extended || len(opts.Nth) == 1 {
for _, r := range opts.Nth {
if r.begin == rangeEllipsis && r.end == rangeEllipsis {
opts.Nth = make([]Range, 0)
return
}
}
}
if opts.Bold {
theme := opts.Theme
boldify := func(c tui.ColorAttr) tui.ColorAttr {
dup := c
if !theme.Colored {
dup.Attr |= tui.Bold
} else if (c.Attr & tui.AttrRegular) == 0 {
dup.Attr |= tui.Bold
}
return dup
}
theme.Current = boldify(theme.Current)
theme.CurrentMatch = boldify(theme.CurrentMatch)
theme.Prompt = boldify(theme.Prompt)
theme.Input = boldify(theme.Input)
theme.Cursor = boldify(theme.Cursor)
theme.Spinner = boldify(theme.Spinner)
}
}
// ParseOptions parses command-line options
func ParseOptions() *Options {
opts := defaultOptions()
// Options from Env var
words, _ := shellwords.Parse(os.Getenv("FZF_DEFAULT_OPTS"))
if len(words) > 0 {
parseOptions(opts, words)
}
// Options from command-line arguments
parseOptions(opts, os.Args[1:])
postProcessOptions(opts)
return opts
}
| [
"\"FZF_DEFAULT_OPTS\""
]
| []
| [
"FZF_DEFAULT_OPTS"
]
| [] | ["FZF_DEFAULT_OPTS"] | go | 1 | 0 | |
go/src/infra/cros/recovery/internal/localtlw/tlwclient.go | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package localtlw provides local implementation of TLW Access.
package localtlw
import (
"context"
"fmt"
"os"
"strings"
"time"
"go.chromium.org/chromiumos/config/go/api/test/xmlrpc"
"go.chromium.org/luci/common/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
fleet "infra/appengine/crosskylabadmin/api/fleet/v1"
"infra/cros/recovery/docker"
"infra/cros/recovery/internal/localtlw/dutinfo"
tlwio "infra/cros/recovery/internal/localtlw/io"
"infra/cros/recovery/internal/localtlw/localinfo"
"infra/cros/recovery/internal/localtlw/localproxy"
"infra/cros/recovery/internal/localtlw/servod"
"infra/cros/recovery/internal/localtlw/ssh"
tlw_xmlrpc "infra/cros/recovery/internal/localtlw/xmlrpc"
"infra/cros/recovery/internal/log"
"infra/cros/recovery/internal/rpm"
"infra/cros/recovery/tlw"
"infra/libs/sshpool"
ufsAPI "infra/unifiedfleet/api/v1/rpc"
)
const (
// gsCrosImageBucket is the base URL for the Google Storage bucket for
// ChromeOS image archives.
gsCrosImageBucket = "gs://chromeos-image-archive"
// tlwPort is default port used to run TLW on the drones.
tlwPort = 7151
// tlsPort is default port used to run TLS on the drones.
tlsPort = 7152
)
// UFSClient is a client that knows how to work with UFS RPC methods.
type UFSClient interface {
// GetDeviceData retrieves requested device data from the UFS and inventoryV2.
GetDeviceData(ctx context.Context, req *ufsAPI.GetDeviceDataRequest, opts ...grpc.CallOption) (rsp *ufsAPI.GetDeviceDataResponse, err error)
// UpdateDeviceRecoveryData updates the labdata, dutdata, resource state, dut states for a DUT
UpdateDeviceRecoveryData(ctx context.Context, in *ufsAPI.UpdateDeviceRecoveryDataRequest, opts ...grpc.CallOption) (*ufsAPI.UpdateDeviceRecoveryDataResponse, error)
}
// CSAClient is a client that knows how to respond to the GetStableVersion RPC call.
type CSAClient interface {
GetStableVersion(ctx context.Context, in *fleet.GetStableVersionRequest, opts ...grpc.CallOption) (*fleet.GetStableVersionResponse, error)
}
// hostType provides information which type of the host.
type hostType string
const (
hostTypeCros hostType = "cros-host"
hostTypeServo hostType = "servo-host"
hostTypeBtPeer hostType = "bluetooth-peer-host"
hostTypeRouter hostType = "router-host"
hostTypeChameleon hostType = "chameleon-host"
deafultBluetoothPeerServerPort = 9992
)
// tlwClient holds data and represents the local implementation of TLW Access interface.
type tlwClient struct {
csaClient CSAClient
ufsClient UFSClient
sshPool *sshpool.Pool
servodPool *servod.Pool
// Cache received devices from inventory
devices map[string]*tlw.Dut
hostTypes map[string]hostType
// Map to provide name if the DUT host as value and other hosts as key.
hostToParents map[string]string
// Map of version requested and received.
versionMap map[string]*tlw.VersionResponse
}
// New build new local TLW Access instance.
func New(ufs UFSClient, csac CSAClient) (tlw.Access, error) {
c := &tlwClient{
ufsClient: ufs,
csaClient: csac,
sshPool: sshpool.New(ssh.SSHConfig()),
servodPool: servod.NewPool(),
devices: make(map[string]*tlw.Dut),
hostTypes: make(map[string]hostType),
hostToParents: make(map[string]string),
versionMap: make(map[string]*tlw.VersionResponse),
}
return c, nil
}
// Close closes all used resources.
func (c *tlwClient) Close(ctx context.Context) error {
if err := c.sshPool.Close(); err != nil {
return errors.Annotate(err, "tlw client").Err()
}
return c.servodPool.Close()
}
// Ping performs ping by resource name.
//
// For containers it checks if it is up.
func (c *tlwClient) Ping(ctx context.Context, resourceName string, count int) error {
dut, err := c.getDevice(ctx, resourceName)
if err != nil {
return errors.Annotate(err, "ping").Err()
}
if c.isServoHost(resourceName) && isServodContainer(dut) {
log.Infof(ctx, "Ping: servod container %s starting...", resourceName)
d, err := c.dockerClient(ctx)
if err != nil {
return errors.Annotate(err, "ping").Err()
}
containerName := servoContainerName(dut)
if up, err := d.IsUp(ctx, containerName); err != nil {
return errors.Annotate(err, "ping").Err()
} else if up {
log.Infof(ctx, "Ping: servod container %s is up!", containerName)
return nil
}
return errors.Reason("ping: container %q is down", containerName).Err()
} else {
err = ping(resourceName, count)
return errors.Annotate(err, "ping").Err()
}
}
// Run executes command on device by SSH related to resource name.
//
// Foc containers: For backwards compatibility if command provided without arguments
// we assume the whole command in one string and run it in linux shell (/bin/sh -c).
func (c *tlwClient) Run(ctx context.Context, req *tlw.RunRequest) *tlw.RunResult {
fullCmd := strings.Join(append([]string{req.GetCommand()}, req.GetArgs()...), " ")
dut, err := c.getDevice(ctx, req.GetResource())
if err != nil {
return &tlw.RunResult{
Command: fullCmd,
ExitCode: -1,
Stderr: fmt.Sprintf("run: %s", err),
}
}
// For backward compatibility we set max limit 1 hour for any request.
// 1 hours as some provisioning or download can take longer.
timeout := time.Hour
if req.GetTimeout().IsValid() {
timeout = req.GetTimeout().AsDuration()
}
// Servod-container does not have ssh access so to execute any commands
// we need to use the docker client.
if c.isServoHost(req.GetResource()) && isServodContainer(dut) {
d, err := c.dockerClient(ctx)
if err != nil {
return &tlw.RunResult{
Command: fullCmd,
ExitCode: -1,
Stderr: fmt.Sprintf("run: %s", err),
}
}
eReq := &docker.ExecRequest{
Timeout: timeout,
Cmd: append([]string{req.GetCommand()}, req.GetArgs()...),
}
containerName := servoContainerName(dut)
// For backwards compatibility if only command provide we assume
// that that is whole command in one line. We will run it in linux shell.
if strings.Contains(req.GetCommand(), " ") && len(req.GetArgs()) == 0 {
eReq.Cmd = []string{"/bin/sh", "-c", req.GetCommand()}
// Quoting is only works because the string created for user
// representation and logs, not for use for execution.
fullCmd = fmt.Sprintf("/bin/sh -c %q", req.GetCommand())
}
containerIsUp, err := d.IsUp(ctx, containerName)
if err != nil {
return &tlw.RunResult{
Command: fullCmd,
ExitCode: -1,
Stderr: fmt.Sprintf("run: %s", err),
}
} else if containerIsUp {
// As container is created and running we can execute the commands.
if res, err := d.Exec(ctx, containerName, eReq); err != nil {
return &tlw.RunResult{
Command: fullCmd,
ExitCode: -1,
Stderr: fmt.Sprintf("run: %s", err),
}
} else {
return &tlw.RunResult{
Command: fullCmd,
ExitCode: res.ExitCode,
Stdout: res.Stdout,
Stderr: res.Stderr,
}
}
} else {
// If container is down we will run all command directly by container.
// TODO(otabek): Simplify running a container when move outside.
containerArgs := createServodContainerArgs(false, nil, eReq.Cmd)
res, err := d.Start(ctx, containerName, containerArgs, eReq.Timeout)
if err != nil {
return &tlw.RunResult{
Command: fullCmd,
ExitCode: -1,
Stderr: fmt.Sprintf("run: %s", err),
}
}
return &tlw.RunResult{
Command: fullCmd,
ExitCode: res.ExitCode,
Stdout: res.Stdout,
Stderr: res.Stderr,
}
}
} else {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
cr := make(chan *tlw.RunResult, 1)
go func() {
cr <- ssh.Run(ctx, c.sshPool, localproxy.BuildAddr(req.GetResource()), fullCmd)
}()
select {
case r := <-cr:
return r
case <-ctx.Done():
// If we reached timeout first.
return &tlw.RunResult{
Command: fullCmd,
ExitCode: 124,
Stderr: fmt.Sprintf("run: excited timeout %s", timeout),
}
}
}
}
// InitServod initiates servod daemon on servo-host.
func (c *tlwClient) InitServod(ctx context.Context, req *tlw.InitServodRequest) error {
dut, err := c.getDevice(ctx, req.Resource)
if err != nil {
return errors.Annotate(err, "init servod %q", req.Resource).Err()
}
if dut.ServoHost == nil || dut.ServoHost.Name == "" {
return errors.Reason("init servod %q: servo is not found", req.Resource).Err()
}
if isServodContainer(dut) {
err := c.startServodContainer(ctx, dut, req.Options)
return errors.Annotate(err, "init servod %q", req.Resource).Err()
}
s, err := c.servodPool.Get(
localproxy.BuildAddr(dut.ServoHost.Name),
int32(dut.ServoHost.ServodPort),
func() ([]string, error) {
return servod.GenerateParams(req.Options), nil
})
if err != nil {
return errors.Annotate(err, "init servod %q", req.Resource).Err()
}
if err := s.Prepare(ctx, c.sshPool); err != nil {
return errors.Annotate(err, "init servod %q", req.Resource).Err()
}
return nil
}
// dockerServodImageName provides image for servod when use container.
func dockerServodImageName() string {
// TODO(otabek): Value has to come here from somewhere.
return "us-docker.pkg.dev/chromeos-partner-moblab/common-core/servod:release"
}
// createServodContainerArgs creates default args for servodContainer.
func createServodContainerArgs(detached bool, envVar, cmd []string) *docker.ContainerArgs {
return &docker.ContainerArgs{
Detached: detached,
EnvVar: envVar,
ImageName: dockerServodImageName(),
Network: defaultDockerNetwork(),
Volumes: []string{"/dev:/dev"},
Privileged: true,
Exec: cmd,
}
}
// startServodContainer start servod container if required.
func (c *tlwClient) startServodContainer(ctx context.Context, dut *tlw.Dut, o *tlw.ServodOptions) error {
containerName := servoContainerName(dut)
d, err := c.dockerClient(ctx)
if err != nil {
return errors.Annotate(err, "start servod container").Err()
}
if up, err := d.IsUp(ctx, containerName); err != nil {
return errors.Annotate(err, "start servod container").Err()
} else if up {
log.Debugf(ctx, "Servod container %s is already up!", containerName)
return nil
}
envVar := servod.GenerateParams(o)
containerArgs := createServodContainerArgs(true, envVar, []string{"bash", "/start_servod.sh"})
res, err := d.Start(ctx, containerName, containerArgs, time.Hour)
if err != nil {
return errors.Annotate(err, "start servod container").Err()
}
log.Debugf(ctx, "Container started with id:%s\n with errout: %s", res.Stdout, res.Stderr)
// Wait 3 seconds as sometimes container is not fully initialized and fail
// when start ing working with servod or tooling.
// TODO(otabek): Move to servod-container wrapper.
time.Sleep(3 * time.Second)
// Waiting to finish servod initialization.
eReq := &docker.ExecRequest{
Timeout: 2 * time.Minute,
Cmd: []string{"servodtool", "instance", "wait-for-active", "-p", fmt.Sprintf("%d", o.ServodPort)},
}
if _, err := d.Exec(ctx, containerName, eReq); err != nil {
return errors.Annotate(err, "start servod container").Err()
}
log.Debugf(ctx, "Servod container %s started and up!", containerName)
return nil
}
// defaultDockerNetwork provides network in which docker need to run.
func defaultDockerNetwork() string {
network := os.Getenv("DOCKER_DEFAULT_NETWORK")
// If not provided then use host network.
if network == "" {
network = "host"
}
return network
}
// StopServod stops servod daemon on servo-host.
func (c *tlwClient) StopServod(ctx context.Context, resourceName string) error {
dut, err := c.getDevice(ctx, resourceName)
if err != nil {
return errors.Annotate(err, "stop servod %q", resourceName).Err()
}
if dut.ServoHost == nil || dut.ServoHost.Name == "" {
return errors.Reason("stop servod %q: servo is not found", resourceName).Err()
}
if isServodContainer(dut) {
if d, err := c.dockerClient(ctx); err != nil {
return errors.Annotate(err, "stop servod %q", resourceName).Err()
} else {
err := d.Remove(ctx, servoContainerName(dut), true)
return errors.Annotate(err, "stop servod %q", resourceName).Err()
}
}
// TODO: Move options to stop request.
o := &tlw.ServodOptions{
ServodPort: int32(dut.ServoHost.ServodPort),
}
s, err := c.servodPool.Get(
localproxy.BuildAddr(dut.ServoHost.Name),
o.ServodPort,
func() ([]string, error) {
return servod.GenerateParams(o), nil
})
if err != nil {
return errors.Annotate(err, "stop servod %q", resourceName).Err()
}
if err := s.Stop(ctx, c.sshPool); err != nil {
return errors.Annotate(err, "stop servod %q", resourceName).Err()
}
return nil
}
// CallServod executes a command on servod related to resource name.
// Commands will be run against servod on servo-host.
func (c *tlwClient) CallServod(ctx context.Context, req *tlw.CallServodRequest) *tlw.CallServodResponse {
// Translator to convert error to response structure.
fail := func(err error) *tlw.CallServodResponse {
return &tlw.CallServodResponse{
Value: &xmlrpc.Value{
ScalarOneof: &xmlrpc.Value_String_{
String_: fmt.Sprintf("call servod %q: %s", req.Resource, err),
},
},
Fault: true,
}
}
dut, err := c.getDevice(ctx, req.Resource)
if err != nil {
return fail(err)
}
if dut.ServoHost == nil || dut.ServoHost.Name == "" {
return fail(errors.Reason("call servod %q: servo not found", req.Resource).Err())
}
// For container connect to the container as it running on the same host.
if isServodContainer(dut) {
d, err := c.dockerClient(ctx)
if err != nil {
return fail(err)
}
addr, err := d.IPAddress(ctx, servoContainerName(dut))
if err != nil {
return fail(err)
}
rpc := tlw_xmlrpc.New(addr, dut.ServoHost.ServodPort)
if val, err := servod.Call(ctx, rpc, req.Timeout, req.Method, req.Args); err != nil {
return fail(err)
} else {
return &tlw.CallServodResponse{
Value: val,
Fault: false,
}
}
} else {
// For labstation using port forward by ssh.
s, err := c.servodPool.Get(
localproxy.BuildAddr(dut.ServoHost.Name),
int32(dut.ServoHost.ServodPort), nil)
if err != nil {
return fail(err)
}
if val, err := s.Call(ctx, c.sshPool, req.Timeout, req.Method, req.Args); err != nil {
return fail(err)
} else {
return &tlw.CallServodResponse{
Value: val,
Fault: false,
}
}
}
}
// CallBluetoothPeer executes a command on bluetooth-peer service.
func (c *tlwClient) CallBluetoothPeer(ctx context.Context, req *tlw.CallBluetoothPeerRequest) *tlw.CallBluetoothPeerResponse {
// Translator to convert error to response structure.
fail := func(err error) *tlw.CallBluetoothPeerResponse {
return &tlw.CallBluetoothPeerResponse{
Value: &xmlrpc.Value{
ScalarOneof: &xmlrpc.Value_String_{
String_: fmt.Sprintf("call servod %q: %s", req.GetResource(), err),
},
},
Fault: true,
}
}
// Check if the name was detected by loaded device.
_, err := c.getDevice(ctx, req.GetResource())
if err != nil {
return fail(err)
}
s, err := c.servodPool.Get(
localproxy.BuildAddr(req.GetResource()),
int32(deafultBluetoothPeerServerPort),
func() ([]string, error) { return nil, nil })
if err != nil {
return fail(err)
}
// TODO: Change bluetooth peer's CallBluetoothPeerRequest to include timeout.
val, err := s.Call(ctx, c.sshPool, 30*time.Second, req.GetMethod(), req.GetArgs())
if err != nil {
return fail(err)
}
return &tlw.CallBluetoothPeerResponse{
Value: val,
Fault: false,
}
}
// validateCopyRequest validates that all data is provided as part of request.
func validateCopyRequest(req *tlw.CopyRequest) error {
if req.Resource == "" {
return errors.Reason("resource is not provided").Err()
} else if req.PathSource == "" {
return errors.Reason("src path is empty").Err()
} else if req.PathDestination == "" {
return errors.Reason("destination path is not provided").Err()
}
return nil
}
// CopyFileTo copies file to remote device from local.
func (c *tlwClient) CopyFileTo(ctx context.Context, req *tlw.CopyRequest) error {
if err := validateCopyRequest(req); err != nil {
return errors.Annotate(err, "copy file to").Err()
}
if err := tlwio.CopyFileTo(ctx, c.sshPool, req); err != nil {
return errors.Annotate(err, "copy file to").Err()
}
return nil
}
// CopyFileFrom copies file from remote device to local.
func (c *tlwClient) CopyFileFrom(ctx context.Context, req *tlw.CopyRequest) error {
if err := validateCopyRequest(req); err != nil {
return errors.Annotate(err, "copy file from").Err()
}
dut, err := c.getDevice(ctx, req.Resource)
if err != nil {
return errors.Annotate(err, "copy file from %q", req.Resource).Err()
}
// The containerized servo-host does not support SSH so we need use docker client.
if c.isServoHost(req.Resource) && isServodContainer(dut) {
d, err := c.dockerClient(ctx)
if err != nil {
return errors.Annotate(err, "copy file from %q", req.Resource).Err()
}
containerName := servoContainerName(dut)
if up, err := d.IsUp(ctx, containerName); err != nil {
return errors.Annotate(err, "copy file from %q", req.Resource).Err()
} else if !up {
log.Infof(ctx, "Copy file from: servod container %s is down!", containerName)
return errors.Annotate(err, "copy file from %q", req.Resource).Err()
}
err = d.CopyFrom(ctx, containerName, req.PathSource, req.PathDestination)
} else {
// Use dirrect copy if hosts support SSH.
err = tlwio.CopyFileFrom(ctx, c.sshPool, &tlw.CopyRequest{
Resource: localproxy.BuildAddr(req.Resource),
PathSource: req.PathSource,
PathDestination: req.PathDestination,
})
}
return errors.Annotate(err, "copy file from %q", req.Resource).Err()
}
// CopyDirectoryTo copies directory to remote device from local, recursively.
func (c *tlwClient) CopyDirectoryTo(ctx context.Context, req *tlw.CopyRequest) error {
if err := tlwio.CopyDirectoryTo(ctx, c.sshPool, req); err != nil {
return errors.Annotate(err, "copy directory to").Err()
}
return nil
}
// CopyDirectoryFrom copies directory from remote device to local, recursively.
func (c *tlwClient) CopyDirectoryFrom(ctx context.Context, req *tlw.CopyRequest) error {
if err := tlwio.CopyDirectoryFrom(ctx, c.sshPool, req); err != nil {
return errors.Annotate(err, "copy directory from").Err()
}
return nil
}
// RunRPMAction performs power action on RPM outlet per request.
func (c *tlwClient) RunRPMAction(ctx context.Context, req *tlw.RunRPMActionRequest) error {
if req.GetHostname() == "" {
return errors.Reason("run rpm action: hostname of DUT is not provided").Err()
}
if req.GetRpmHostname() == "" {
return errors.Reason("run rpm action: power unit hostname is not provided").Err()
}
if req.GetRpmOutlet() == "" {
return errors.Reason("run rpm action: power unit outlet is not provided").Err()
}
var s rpm.PowerState
switch req.GetAction() {
case tlw.RunRPMActionRequest_ON:
s = rpm.PowerStateOn
case tlw.RunRPMActionRequest_OFF:
s = rpm.PowerStateOff
case tlw.RunRPMActionRequest_CYCLE:
s = rpm.PowerStateCycle
default:
return errors.Reason("run rpm action: unknown action: %s", req.GetAction().String()).Err()
}
log.Debugf(ctx, "Changing state RPM outlet %s:%s to state %q.", req.GetRpmHostname(), req.GetRpmOutlet(), s)
rpmReq := &rpm.RPMPowerRequest{
Hostname: req.GetHostname(),
PowerUnitHostname: req.GetRpmHostname(),
PowerunitOutlet: req.GetRpmOutlet(),
State: s,
}
if err := rpm.SetPowerState(ctx, rpmReq); err != nil {
return errors.Annotate(err, "run rpm action").Err()
}
return nil
}
// GetCacheUrl provides URL to download requested path to file.
// URL will use to download image to USB-drive and provisioning.
func (c *tlwClient) GetCacheUrl(ctx context.Context, resourceName, filePath string) (string, error) {
// TODO(otabek@): Add logic to understand local file and just return it back.
addr := fmt.Sprintf("0.0.0.0:%d", tlwPort)
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return "", errors.Annotate(err, "connect to background TLW").Err()
}
defer func() { conn.Close() }()
return CacheForDut(ctx, conn, filePath, resourceName)
}
// ListResourcesForUnit provides list of resources names related to target unit.
func (c *tlwClient) ListResourcesForUnit(ctx context.Context, name string) ([]string, error) {
if name == "" {
return nil, errors.Reason("list resources: unit name is expected").Err()
}
resourceNames, err := c.readInventory(ctx, name)
return resourceNames, errors.Annotate(err, "list resources %q", name).Err()
}
// GetDut provides DUT info per requested resource name from inventory.
func (c *tlwClient) GetDut(ctx context.Context, name string) (*tlw.Dut, error) {
dut, err := c.getDevice(ctx, name)
if err != nil {
return nil, errors.Annotate(err, "get DUT %q", name).Err()
}
dut.ProvisionedInfo, err = localinfo.ReadProvisionInfo(ctx, dut.Name)
return dut, errors.Annotate(err, "get dut").Err()
}
// Version provides versions for requested device and type of versions.
func (c *tlwClient) Version(ctx context.Context, req *tlw.VersionRequest) (*tlw.VersionResponse, error) {
if req == nil || req.Resource == "" {
return nil, errors.Reason("version: request is not provided").Err()
}
// Creating cache key for versions based on hostname which is targeted.
versionKey := fmt.Sprintf("%s|%s", req.GetType(), req.Resource)
if v, ok := c.versionMap[versionKey]; ok {
log.Debugf(ctx, "Received version %q (cache): %#v", req.GetType(), v)
return v, nil
}
dut, err := c.getDevice(ctx, req.Resource)
if err != nil {
return nil, errors.Annotate(err, "version").Err()
}
var res *tlw.VersionResponse
switch req.GetType() {
case tlw.VersionRequest_CROS:
if sv, err := c.getCrosStableVersion(ctx, dut); err != nil {
log.Infof(ctx, "version: failed to receive stable-version for %q. Error: %s", dut.Name, err)
} else {
res = sv
}
case tlw.VersionRequest_WIFI_ROUTER:
// TODO: need implement
res = &tlw.VersionResponse{
Value: map[string]string{
"os_image": "gale-test-ap-tryjob/R92-13982.81.0-b4959409",
},
}
}
log.Debugf(ctx, "Received version %q: %#v", req.GetType(), res)
c.versionMap[versionKey] = res
return res, nil
}
// getDevice receives device from inventory.
func (c *tlwClient) getDevice(ctx context.Context, name string) (*tlw.Dut, error) {
if dutName, ok := c.hostToParents[name]; ok {
// the device was previously
name = dutName
}
// First check if device is already in the cache.
if d, ok := c.devices[name]; ok {
log.Debugf(ctx, "Get device %q: received from cache.", name)
return d, nil
}
// Ask to read inventory and then get device from the cache.
// If it is still not in the cache then device is unit, not a DUT
if _, err := c.readInventory(ctx, name); err != nil {
return nil, errors.Annotate(err, "get device").Err()
}
if d, ok := c.devices[name]; ok {
log.Debugf(ctx, "Get device %q: received from cache.", name)
return d, nil
}
return nil, errors.Reason("get device: unexpected error").Err()
}
// Read inventory and return resource names.
// As additional received devices will be cached.
// Please try to check cache before call the method.
func (c *tlwClient) readInventory(ctx context.Context, name string) (resourceNames []string, rErr error) {
ddrsp, err := c.ufsClient.GetDeviceData(ctx, &ufsAPI.GetDeviceDataRequest{Hostname: name})
if err != nil {
return resourceNames, errors.Annotate(err, "read inventory %q", name).Err()
}
var dut *tlw.Dut
switch ddrsp.GetResourceType() {
case ufsAPI.GetDeviceDataResponse_RESOURCE_TYPE_ATTACHED_DEVICE:
attachedDevice := ddrsp.GetAttachedDeviceData()
dut, err = dutinfo.ConvertAttachedDeviceToTlw(attachedDevice)
if err != nil {
return resourceNames, errors.Annotate(err, "read inventory %q: attached device", name).Err()
}
c.cacheDevice(dut)
resourceNames = []string{dut.Name}
case ufsAPI.GetDeviceDataResponse_RESOURCE_TYPE_CHROMEOS_DEVICE:
dd := ddrsp.GetChromeOsDeviceData()
dut, err = dutinfo.ConvertDut(dd)
if err != nil {
return resourceNames, errors.Annotate(err, "get device %q: chromeos device", name).Err()
}
c.cacheDevice(dut)
resourceNames = []string{dut.Name}
case ufsAPI.GetDeviceDataResponse_RESOURCE_TYPE_SCHEDULING_UNIT:
su := ddrsp.GetSchedulingUnit()
for _, hostname := range su.GetMachineLSEs() {
resourceNames = append(resourceNames, hostname)
}
default:
return resourceNames, errors.Reason("get device %q: unsupported type %q", name, ddrsp.GetResourceType()).Err()
}
return resourceNames, nil
}
// cacheDevice puts device to local cache and set list host name knows for DUT.
func (c *tlwClient) cacheDevice(dut *tlw.Dut) {
if dut == nil {
// Skip as DUT not found.
return
}
name := dut.Name
c.devices[name] = dut
c.hostToParents[name] = name
c.hostTypes[dut.Name] = hostTypeCros
if dut.ServoHost != nil && dut.ServoHost.Name != "" {
c.hostTypes[dut.ServoHost.Name] = hostTypeServo
c.hostToParents[dut.ServoHost.Name] = name
}
for _, bt := range dut.BluetoothPeerHosts {
if bt.Name != "" {
c.hostTypes[bt.Name] = hostTypeBtPeer
c.hostToParents[bt.Name] = name
}
}
for _, router := range dut.WifiRouterHosts {
if router != nil && router.GetName() != "" {
c.hostTypes[router.GetName()] = hostTypeRouter
c.hostToParents[router.GetName()] = name
}
}
if dut.ChameleonHost != nil && dut.ChameleonHost.Name != "" {
c.hostTypes[dut.ChameleonHost.Name] = hostTypeChameleon
c.hostToParents[dut.ChameleonHost.Name] = name
}
}
// cacheDevice puts device to local cache and set list host name knows for DUT.
func (c *tlwClient) unCacheDevice(dut *tlw.Dut) {
if dut == nil {
// Skip as DUT not provided.
return
}
name := dut.Name
delete(c.hostToParents, name)
delete(c.hostTypes, name)
if dut.ServoHost != nil && dut.ServoHost.Name != "" {
delete(c.hostTypes, dut.ServoHost.Name)
delete(c.hostToParents, dut.ServoHost.Name)
}
for _, bt := range dut.BluetoothPeerHosts {
if bt.Name != "" {
delete(c.hostTypes, bt.Name)
delete(c.hostToParents, bt.Name)
}
}
if dut.ChameleonHost != nil && dut.ChameleonHost.Name != "" {
delete(c.hostTypes, dut.ChameleonHost.Name)
delete(c.hostToParents, dut.ChameleonHost.Name)
}
delete(c.devices, name)
}
// isServoHost tells if host is servo-host.
func (c *tlwClient) isServoHost(host string) bool {
if v, ok := c.hostTypes[host]; ok {
return v == hostTypeServo
}
return false
}
// getCrosStableVersion receives stable versions for ChromeOS device.
func (c *tlwClient) getCrosStableVersion(ctx context.Context, dut *tlw.Dut) (*tlw.VersionResponse, error) {
req := &fleet.GetStableVersionRequest{Hostname: dut.Name}
res, err := c.csaClient.GetStableVersion(ctx, req)
if err != nil {
if status.Code(err) == codes.NotFound {
return nil, errors.Reason("get stable-version %q: record not found", dut.Name).Err()
}
return nil, errors.Annotate(err, "get stable-version %q", dut.Name).Err()
}
if res.GetCrosVersion() == "" {
return nil, errors.Reason("get stable-version %q: version is empty", dut.Name).Err()
}
return &tlw.VersionResponse{
Value: map[string]string{
"os_image": fmt.Sprintf("%s-release/%s", dut.Board, res.GetCrosVersion()),
"fw_image": res.GetFaftVersion(),
"fw_version": res.GetFirmwareVersion(),
},
}, nil
}
// UpdateDut updates DUT info into inventory.
func (c *tlwClient) UpdateDut(ctx context.Context, dut *tlw.Dut) error {
if dut == nil {
return errors.Reason("update DUT: DUT is not provided").Err()
}
dut, err := c.getDevice(ctx, dut.Name)
if err != nil {
return errors.Annotate(err, "update DUT %q", dut.Name).Err()
}
req, err := dutinfo.CreateUpdateDutRequest(dut.Id, dut)
if err != nil {
return errors.Annotate(err, "update DUT %q", dut.Name).Err()
}
log.Debugf(ctx, "Update DUT: update request: %s", req)
rsp, err := c.ufsClient.UpdateDeviceRecoveryData(ctx, req)
if err != nil {
return errors.Annotate(err, "update DUT %q", dut.Name).Err()
}
log.Debugf(ctx, "Update DUT: update response: %s", rsp)
c.unCacheDevice(dut)
// Update provisioning data on the execution env.
err = localinfo.UpdateProvisionInfo(ctx, dut)
return errors.Annotate(err, "udpate dut").Err()
}
// Provision triggers provisioning of the device.
func (c *tlwClient) Provision(ctx context.Context, req *tlw.ProvisionRequest) error {
if req == nil {
return errors.Reason("provision: request is empty").Err()
}
if req.GetResource() == "" {
return errors.Reason("provision: resource is not specified").Err()
}
if req.GetSystemImagePath() == "" {
return errors.Reason("provision: system image path is not specified").Err()
}
log.Debugf(ctx, "Started provisioning by TLS: %s", req)
addr := fmt.Sprintf("0.0.0.0:%d", tlsPort)
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return errors.Annotate(err, "provision: connect to TLS").Err()
}
defer func() { conn.Close() }()
err = TLSProvision(ctx, conn, req)
return errors.Annotate(err, "provision").Err()
}
// dockerClient provides docker client for target container by expected name of container.
func (c *tlwClient) dockerClient(ctx context.Context) (docker.Client, error) {
d, err := docker.NewClient(ctx)
return d, errors.Annotate(err, "docker client").Err()
}
// isServodContainer checks if DUT using servod-container.
// For now just simple check if servod container is provided.
// Later need distinguish when container running on the same host or remove one.
func isServodContainer(d *tlw.Dut) bool {
return servoContainerName(d) != ""
}
// servoContainerName returns container name specified for servo-host.
func servoContainerName(d *tlw.Dut) string {
if d == nil || d.ServoHost == nil {
return ""
}
return d.ServoHost.ContainerName
}
| [
"\"DOCKER_DEFAULT_NETWORK\""
]
| []
| [
"DOCKER_DEFAULT_NETWORK"
]
| [] | ["DOCKER_DEFAULT_NETWORK"] | go | 1 | 0 | |
pkg/k8s/conn.go | package k8s
import (
"fmt"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"os"
)
func AuthenticateInCluster() (*kubernetes.Clientset, error) {
// creates the in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("failed to get client config: %v", err)
}
// creates the clientset
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to generate client set: %v", err)
}
return clientSet, nil
}
func AuthenticateOutOfCluster() (*kubernetes.Clientset, error) {
kubeconfig := os.Getenv("KUBECONFIG")
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to get client config: %v", err)
}
// create the clientset
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to generate client set: %v", err)
}
return clientSet, nil
}
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
minebd/src/main/java/io/minebox/nbd/SystemdUtil.java | package io.minebox.nbd;
import info.faljse.SDNotify.SDNotify;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SystemdUtil {
private static final boolean hasEnv;
private final static Logger LOGGER = LoggerFactory.getLogger(SystemdUtil.class);
static {
final String notifySocket = System.getenv().get("NOTIFY_SOCKET");
hasEnv = !(notifySocket == null || notifySocket.length() == 0);
if (!hasEnv) {
LOGGER.info("we appear to run outside systemd");
} else {
LOGGER.info("we appear to run inside systemd");
}
}
void sendStopping() {
LOGGER.info("sendStopping");
if (hasEnv) {
SDNotify.sendStopping();
}
}
void sendError(int errno) {
LOGGER.info("sendErrno {}", errno);
if (hasEnv) {
SDNotify.sendErrno(errno);
}
}
void sendNotify() {
LOGGER.info("sendNotify");
if (hasEnv) {
SDNotify.sendNotify();
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
config/config.go | package config
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
var (
// Default sections
sectionDefaults = []string{"global_tags", "agent", "outputs",
"processors", "aggregators", "inputs"}
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
// Default output plugins
outputDefaults = []string{"influxdb"}
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
httpLoadConfigRetryInterval = 10 * time.Second
// fetchURLRe is a regex to determine whether the requested file should
// be fetched from a remote or read from the filesystem.
fetchURLRe = regexp.MustCompile(`^\w+://`)
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
toml *toml.Config
errs []error // config load errors.
UnusedFields map[string]bool
Tags map[string]string
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
Processors models.RunningProcessors
AggProcessors models.RunningProcessors
}
// NewConfig creates a new struct to hold the Telegraf config.
// For historical reasons, It holds the actual instances of the running plugins
// once the configuration is parsed.
func NewConfig() *Config {
c := &Config{
UnusedFields: map[string]bool{},
// Agent defaults:
Agent: &AgentConfig{
Interval: Duration(10 * time.Second),
RoundInterval: true,
FlushInterval: Duration(10 * time.Second),
LogTarget: "file",
LogfileRotationMaxArchives: 5,
},
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
AggProcessors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
tomlCfg := &toml.Config{
NormFieldName: toml.DefaultConfig.NormFieldName,
FieldToKey: toml.DefaultConfig.FieldToKey,
MissingField: c.missingTomlField,
}
c.toml = tomlCfg
return c
}
// AgentConfig defines configuration that will be used by the Telegraf agent
type AgentConfig struct {
// Interval at which to gather information
Interval Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
Precision Duration
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
CollectionJitter Duration
// FlushInterval is the Interval at which to flush data
FlushInterval Duration
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter Duration
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
MetricBufferLimit int
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
FlushBufferWhenFull bool // deprecated in 0.13; has no effect
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect
// Debug is the option for running in debug mode
Debug bool `toml:"debug"`
// Quiet is the option for running in quiet mode
Quiet bool `toml:"quiet"`
// Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting.
LogTarget string `toml:"logtarget"`
// Name of the file to be logged to when using the "file" logtarget. If set to
// the empty string then logs are written to stderr.
Logfile string `toml:"logfile"`
// The file will be rotated after the time interval specified. When set
// to 0 no time based rotation is performed.
LogfileRotationInterval Duration `toml:"logfile_rotation_interval"`
// The logfile will be rotated when it becomes larger than the specified
// size. When set to 0 no size based rotation is performed.
LogfileRotationMaxSize Size `toml:"logfile_rotation_max_size"`
// Maximum number of rotated archives to keep, any older logs are deleted.
// If set to -1, no archives are removed.
LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"`
// Pick a timezone to use when logging or type 'local' for local time.
LogWithTimezone string `toml:"log_with_timezone"`
Hostname string
OmitHostname bool
}
// InputNames returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Config.Name)
}
return PluginNameCounts(name)
}
// AggregatorNames returns a list of strings of the configured aggregators.
func (c *Config) AggregatorNames() []string {
var name []string
for _, aggregator := range c.Aggregators {
name = append(name, aggregator.Config.Name)
}
return PluginNameCounts(name)
}
// ProcessorNames returns a list of strings of the configured processors.
func (c *Config) ProcessorNames() []string {
var name []string
for _, processor := range c.Processors {
name = append(name, processor.Config.Name)
}
return PluginNameCounts(name)
}
// OutputNames returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Config.Name)
}
return PluginNameCounts(name)
}
// PluginNameCounts returns a list of sorted plugin names and their count
func PluginNameCounts(plugins []string) []string {
names := make(map[string]int)
for _, plugin := range plugins {
names[plugin]++
}
var namecount []string
for name, count := range names {
if count == 1 {
namecount = append(namecount, name)
} else {
namecount = append(namecount, fmt.Sprintf("%s (%dx)", name, count))
}
}
sort.Strings(namecount)
return namecount
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
`
var globalTagsConfig = `
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
`
var agentConfig = `
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
`
var outputHeader = `
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
# INPUT PLUGINS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(
sectionFilters []string,
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
// print headers
fmt.Printf(header)
if len(sectionFilters) == 0 {
sectionFilters = sectionDefaults
}
printFilteredGlobalSections(sectionFilters)
// print output plugins
if sliceContains("outputs", sectionFilters) {
if len(outputFilters) != 0 {
if len(outputFilters) >= 3 && outputFilters[1] != "none" {
fmt.Printf(outputHeader)
}
printFilteredOutputs(outputFilters, false)
} else {
fmt.Printf(outputHeader)
printFilteredOutputs(outputDefaults, false)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !sliceContains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredOutputs(pnames, true)
}
}
// print processor plugins
if sliceContains("processors", sectionFilters) {
if len(processorFilters) != 0 {
if len(processorFilters) >= 3 && processorFilters[1] != "none" {
fmt.Printf(processorHeader)
}
printFilteredProcessors(processorFilters, false)
} else {
fmt.Printf(processorHeader)
pnames := []string{}
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredProcessors(pnames, true)
}
}
// print aggregator plugins
if sliceContains("aggregators", sectionFilters) {
if len(aggregatorFilters) != 0 {
if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" {
fmt.Printf(aggregatorHeader)
}
printFilteredAggregators(aggregatorFilters, false)
} else {
fmt.Printf(aggregatorHeader)
pnames := []string{}
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredAggregators(pnames, true)
}
}
// print input plugins
if sliceContains("inputs", sectionFilters) {
if len(inputFilters) != 0 {
if len(inputFilters) >= 3 && inputFilters[1] != "none" {
fmt.Printf(inputHeader)
}
printFilteredInputs(inputFilters, false)
} else {
fmt.Printf(inputHeader)
printFilteredInputs(inputDefaults, false)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !sliceContains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredInputs(pnames, true)
}
}
}
func printFilteredProcessors(processorFilters []string, commented bool) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if sliceContains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if sliceContains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if sliceContains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
// Print Inputs
for _, pname := range pnames {
if pname == "cisco_telemetry_gnmi" {
continue
}
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
}
}
func printFilteredOutputs(outputFilters []string, commented bool) {
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs", commented)
}
}
func printFilteredGlobalSections(sectionFilters []string) {
if sliceContains("global_tags", sectionFilters) {
fmt.Printf(globalTagsConfig)
}
if sliceContains("agent", sectionFilters) {
fmt.Printf(agentConfig)
}
}
func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) {
comment := ""
if commented {
comment = "# "
}
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n%s # no configuration\n\n", comment)
} else {
lines := strings.Split(config, "\n")
for i, line := range lines {
if i == 0 || i == len(lines)-1 {
fmt.Print("\n")
continue
}
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
}
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs", false)
} else {
return fmt.Errorf("Input %s not found", name)
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs", false)
} else {
return fmt.Errorf("Output %s not found", name)
}
return nil
}
// LoadDirectory loads all toml config files found in the specified path, recursively.
func (c *Config) LoadDirectory(path string) error {
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info == nil {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
return nil
}
err := c.LoadConfig(thispath)
if err != nil {
return err
}
return nil
}
return filepath.Walk(path, walkfn)
}
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
programFiles := os.Getenv("ProgramFiles")
if programFiles == "" { // Should never happen
programFiles = `C:\Program Files`
}
etcfile = programFiles + `\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} {
if _, err := os.Stat(path); err == nil {
log.Printf("I! Using config file: %s", path)
return path, nil
}
}
// if we got here, we didn't find a file in a default location
return "", fmt.Errorf("No config file specified, and could not find one"+
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
var err error
if path == "" {
if path, err = getDefaultConfigPath(); err != nil {
return err
}
}
data, err := loadConfig(path)
if err != nil {
return fmt.Errorf("Error loading config file %s: %w", path, err)
}
if err = c.LoadConfigData(data); err != nil {
return fmt.Errorf("Error loading config file %s: %w", path, err)
}
return nil
}
// LoadConfigData loads TOML-formatted config data
func (c *Config) LoadConfigData(data []byte) error {
tbl, err := parseConfig(data)
if err != nil {
return fmt.Errorf("Error parsing data: %s", err)
}
// Parse tags tables first:
for _, tableName := range []string{"tags", "global_tags"} {
if val, ok := tbl.Fields[tableName]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("invalid configuration, bad table name %q", tableName)
}
if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil {
return fmt.Errorf("error parsing table name %q: %s", tableName, err)
}
}
}
// Parse agent table:
if val, ok := tbl.Fields["agent"]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("invalid configuration, error parsing agent table")
}
if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil {
return fmt.Errorf("error parsing [agent]: %w", err)
}
}
if !c.Agent.OmitHostname {
if c.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return err
}
c.Agent.Hostname = hostname
}
c.Tags["host"] = c.Agent.Hostname
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields))
}
// Parse all the rest of the plugins:
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("invalid configuration, error parsing field %q as table", name)
}
switch name {
case "agent", "global_tags", "tags":
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
return fmt.Errorf("error parsing %s array, %w", pluginName, err)
}
}
default:
return fmt.Errorf("unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
case "processors":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
case "aggregators":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", name, err)
}
}
}
if len(c.Processors) > 1 {
sort.Sort(c.Processors)
}
return nil
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
func loadConfig(config string) ([]byte, error) {
if fetchURLRe.MatchString(config) {
u, err := url.Parse(config)
if err != nil {
return nil, err
}
switch u.Scheme {
case "https", "http":
return fetchConfig(u)
default:
return nil, fmt.Errorf("scheme %q not supported", u.Scheme)
}
}
// If it isn't a https scheme, try it as a file
return ioutil.ReadFile(config)
}
func fetchConfig(u *url.URL) ([]byte, error) {
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
req.Header.Add("Authorization", "Token "+v)
}
req.Header.Add("Accept", "application/toml")
req.Header.Set("User-Agent", internal.ProductToken())
retries := 3
for i := 0; i <= retries; i++ {
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("Retry %d of %d failed connecting to HTTP config server %s", i, retries, err)
}
if resp.StatusCode != http.StatusOK {
if i < retries {
log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode)
time.Sleep(httpLoadConfigRetryInterval)
continue
}
return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status)
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
return nil, nil
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig(contents []byte) (*ast.Table, error) {
contents = trimBOM(contents)
parameters := envVarRe.FindAllSubmatch(contents, -1)
for _, parameter := range parameters {
if len(parameter) != 3 {
continue
}
var envVar []byte
if parameter[1] != nil {
envVar = parameter[1]
} else if parameter[2] != nil {
envVar = parameter[2]
} else {
continue
}
envVal, ok := os.LookupEnv(strings.TrimPrefix(string(envVar), "$"))
if ok {
envVal = escapeEnv(envVal)
contents = bytes.Replace(contents, parameter[0], []byte(envVal), 1)
}
}
return toml.Parse(contents)
}
func (c *Config) addAggregator(name string, table *ast.Table) error {
creator, ok := aggregators.Aggregators[name]
if !ok {
return fmt.Errorf("Undefined but requested aggregator: %s", name)
}
aggregator := creator()
conf, err := c.buildAggregator(name, table)
if err != nil {
return err
}
if err := c.toml.UnmarshalTable(table, aggregator); err != nil {
return err
}
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
return nil
}
func (c *Config) addProcessor(name string, table *ast.Table) error {
creator, ok := processors.Processors[name]
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
processorConfig, err := c.buildProcessor(name, table)
if err != nil {
return err
}
rf, err := c.newRunningProcessor(creator, processorConfig, table)
if err != nil {
return err
}
c.Processors = append(c.Processors, rf)
// save a copy for the aggregator
rf, err = c.newRunningProcessor(creator, processorConfig, table)
if err != nil {
return err
}
c.AggProcessors = append(c.AggProcessors, rf)
return nil
}
func (c *Config) newRunningProcessor(
creator processors.StreamingCreator,
processorConfig *models.ProcessorConfig,
table *ast.Table,
) (*models.RunningProcessor, error) {
processor := creator()
if p, ok := processor.(unwrappable); ok {
if err := c.toml.UnmarshalTable(table, p.Unwrap()); err != nil {
return nil, err
}
} else {
if err := c.toml.UnmarshalTable(table, processor); err != nil {
return nil, err
}
}
rf := models.NewRunningProcessor(processor, processorConfig)
return rf, nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
switch t := output.(type) {
case serializers.SerializerOutput:
serializer, err := c.buildSerializer(table)
if err != nil {
return err
}
t.SetSerializer(serializer)
}
outputConfig, err := c.buildOutput(name, table)
if err != nil {
return err
}
if err := c.toml.UnmarshalTable(table, output); err != nil {
return err
}
ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested input: %s", name)
}
input := creator()
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
if t, ok := input.(parsers.ParserInput); ok {
parser, err := c.buildParser(name, table)
if err != nil {
return err
}
t.SetParser(parser)
}
if t, ok := input.(parsers.ParserFuncInput); ok {
config, err := c.getParserConfig(name, table)
if err != nil {
return err
}
t.SetParserFunc(func() (parsers.Parser, error) {
return parsers.NewParser(config)
})
}
pluginConfig, err := c.buildInput(name, table)
if err != nil {
return err
}
if err := c.toml.UnmarshalTable(table, input); err != nil {
return err
}
rp := models.NewRunningInput(input, pluginConfig)
rp.SetDefaultTags(c.Tags)
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
conf := &models.AggregatorConfig{
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
Grace: time.Second * 0,
}
c.getFieldDuration(tbl, "period", &conf.Period)
c.getFieldDuration(tbl, "delay", &conf.Delay)
c.getFieldDuration(tbl, "grace", &conf.Grace)
c.getFieldBool(tbl, "drop_original", &conf.DropOriginal)
c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix)
c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix)
c.getFieldString(tbl, "name_override", &conf.NameOverride)
c.getFieldString(tbl, "alias", &conf.Alias)
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
return nil, fmt.Errorf("could not parse tags for input %s", name)
}
}
}
if c.hasErrs() {
return nil, c.firstErr()
}
var err error
conf.Filter, err = c.buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
conf := &models.ProcessorConfig{Name: name}
c.getFieldInt64(tbl, "order", &conf.Order)
c.getFieldString(tbl, "alias", &conf.Alias)
if c.hasErrs() {
return nil, c.firstErr()
}
var err error
conf.Filter, err = c.buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements
func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) {
f := models.Filter{}
c.getFieldStringSlice(tbl, "namepass", &f.NamePass)
c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop)
c.getFieldStringSlice(tbl, "pass", &f.FieldPass)
c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass)
c.getFieldStringSlice(tbl, "drop", &f.FieldDrop)
c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop)
c.getFieldTagFilter(tbl, "tagpass", &f.TagPass)
c.getFieldTagFilter(tbl, "tagdrop", &f.TagDrop)
c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude)
c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude)
if c.hasErrs() {
return f, c.firstErr()
}
if err := f.Compile(); err != nil {
return f, err
}
return f, nil
}
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// models.InputConfig to be inserted into models.RunningInput
func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &models.InputConfig{Name: name}
c.getFieldDuration(tbl, "interval", &cp.Interval)
c.getFieldDuration(tbl, "precision", &cp.Precision)
c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter)
c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix)
c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix)
c.getFieldString(tbl, "name_override", &cp.NameOverride)
c.getFieldString(tbl, "alias", &cp.Alias)
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
return nil, fmt.Errorf("could not parse tags for input %s", name)
}
}
}
if c.hasErrs() {
return nil, c.firstErr()
}
var err error
cp.Filter, err = c.buildFilter(tbl)
if err != nil {
return cp, err
}
return cp, nil
}
// buildParser grabs the necessary entries from the ast.Table for creating
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
config, err := c.getParserConfig(name, tbl)
if err != nil {
return nil, err
}
parser, err := parsers.NewParser(config)
if err != nil {
return nil, err
}
logger := models.NewLogger("parsers", config.DataFormat, name)
models.SetLoggerOnPlugin(parser, logger)
return parser, nil
}
func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
pc := &parsers.Config{
JSONStrict: true,
}
c.getFieldString(tbl, "data_format", &pc.DataFormat)
// Legacy support, exec plugin originally parsed JSON by default.
if name == "exec" && pc.DataFormat == "" {
pc.DataFormat = "json"
} else if pc.DataFormat == "" {
pc.DataFormat = "influx"
}
c.getFieldString(tbl, "separator", &pc.Separator)
c.getFieldStringSlice(tbl, "templates", &pc.Templates)
c.getFieldStringSlice(tbl, "tag_keys", &pc.TagKeys)
c.getFieldStringSlice(tbl, "json_string_fields", &pc.JSONStringFields)
c.getFieldString(tbl, "json_name_key", &pc.JSONNameKey)
c.getFieldString(tbl, "json_query", &pc.JSONQuery)
c.getFieldString(tbl, "json_time_key", &pc.JSONTimeKey)
c.getFieldString(tbl, "json_time_format", &pc.JSONTimeFormat)
c.getFieldString(tbl, "json_timezone", &pc.JSONTimezone)
c.getFieldBool(tbl, "json_strict", &pc.JSONStrict)
c.getFieldString(tbl, "data_type", &pc.DataType)
c.getFieldString(tbl, "collectd_auth_file", &pc.CollectdAuthFile)
c.getFieldString(tbl, "collectd_security_level", &pc.CollectdSecurityLevel)
c.getFieldString(tbl, "collectd_parse_multivalue", &pc.CollectdSplit)
c.getFieldStringSlice(tbl, "collectd_typesdb", &pc.CollectdTypesDB)
c.getFieldString(tbl, "dropwizard_metric_registry_path", &pc.DropwizardMetricRegistryPath)
c.getFieldString(tbl, "dropwizard_time_path", &pc.DropwizardTimePath)
c.getFieldString(tbl, "dropwizard_time_format", &pc.DropwizardTimeFormat)
c.getFieldString(tbl, "dropwizard_tags_path", &pc.DropwizardTagsPath)
c.getFieldStringMap(tbl, "dropwizard_tag_paths", &pc.DropwizardTagPathsMap)
//for grok data_format
c.getFieldStringSlice(tbl, "grok_named_patterns", &pc.GrokNamedPatterns)
c.getFieldStringSlice(tbl, "grok_patterns", &pc.GrokPatterns)
c.getFieldString(tbl, "grok_custom_patterns", &pc.GrokCustomPatterns)
c.getFieldStringSlice(tbl, "grok_custom_pattern_files", &pc.GrokCustomPatternFiles)
c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone)
c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp)
//for csv parser
c.getFieldStringSlice(tbl, "csv_column_names", &pc.CSVColumnNames)
c.getFieldStringSlice(tbl, "csv_column_types", &pc.CSVColumnTypes)
c.getFieldStringSlice(tbl, "csv_tag_columns", &pc.CSVTagColumns)
c.getFieldString(tbl, "csv_timezone", &pc.CSVTimezone)
c.getFieldString(tbl, "csv_delimiter", &pc.CSVDelimiter)
c.getFieldString(tbl, "csv_comment", &pc.CSVComment)
c.getFieldString(tbl, "csv_measurement_column", &pc.CSVMeasurementColumn)
c.getFieldString(tbl, "csv_timestamp_column", &pc.CSVTimestampColumn)
c.getFieldString(tbl, "csv_timestamp_format", &pc.CSVTimestampFormat)
c.getFieldInt(tbl, "csv_header_row_count", &pc.CSVHeaderRowCount)
c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows)
c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns)
c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace)
c.getFieldStringSlice(tbl, "csv_skip_values", &pc.CSVSkipValues)
c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys)
c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName)
//for XML parser
if node, ok := tbl.Fields["xml"]; ok {
if subtbls, ok := node.([]*ast.Table); ok {
pc.XMLConfig = make([]parsers.XMLConfig, len(subtbls))
for i, subtbl := range subtbls {
subcfg := pc.XMLConfig[i]
c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery)
c.getFieldString(subtbl, "metric_selection", &subcfg.Selection)
c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp)
c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt)
c.getFieldStringMap(subtbl, "tags", &subcfg.Tags)
c.getFieldStringMap(subtbl, "fields", &subcfg.Fields)
c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt)
c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection)
c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand)
c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery)
c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery)
pc.XMLConfig[i] = subcfg
}
}
}
pc.MetricName = name
if c.hasErrs() {
return nil, c.firstErr()
}
return pc, nil
}
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) {
sc := &serializers.Config{TimestampUnits: 1 * time.Second}
c.getFieldString(tbl, "data_format", &sc.DataFormat)
if sc.DataFormat == "" {
sc.DataFormat = "influx"
}
c.getFieldString(tbl, "prefix", &sc.Prefix)
c.getFieldString(tbl, "template", &sc.Template)
c.getFieldStringSlice(tbl, "templates", &sc.Templates)
c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format)
c.getFieldString(tbl, "carbon2_sanitize_replace_char", &sc.Carbon2SanitizeReplaceChar)
c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes)
c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields)
c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport)
c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport)
c.getFieldString(tbl, "graphite_tag_sanitize_mode", &sc.GraphiteTagSanitizeMode)
c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator)
c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits)
c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting)
c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric)
c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride)
c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict)
c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp)
c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics)
c.getFieldBool(tbl, "prometheus_string_as_label", &sc.PrometheusStringAsLabel)
if c.hasErrs() {
return nil, c.firstErr()
}
return serializers.NewSerializer(sc)
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := c.buildFilter(tbl)
if err != nil {
return nil, err
}
oc := &models.OutputConfig{
Name: name,
Filter: filter,
}
// TODO: support FieldPass/FieldDrop on outputs
c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval)
c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter)
c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit)
c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize)
c.getFieldString(tbl, "alias", &oc.Alias)
c.getFieldString(tbl, "name_override", &oc.NameOverride)
c.getFieldString(tbl, "name_suffix", &oc.NameSuffix)
c.getFieldString(tbl, "name_prefix", &oc.NamePrefix)
if c.hasErrs() {
return nil, c.firstErr()
}
return oc, nil
}
func (c *Config) missingTomlField(_ reflect.Type, key string) error {
switch key {
case "alias", "carbon2_format", "carbon2_sanitize_replace_char", "collectd_auth_file",
"collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter",
"csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count",
"csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns",
"csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values",
"data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path",
"dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path",
"fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys",
"grace", "graphite_separator", "graphite_tag_sanitize_mode", "graphite_tag_support",
"grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns",
"grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields",
"influx_uint_support", "interval", "json_name_key", "json_query", "json_strict",
"json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone",
"metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix",
"name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision",
"prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label",
"separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys",
"tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates",
"value_field_name", "wavefront_source_override", "wavefront_use_strict", "xml":
// ignore fields that are common to all plugins.
default:
c.UnusedFields[key] = true
}
return nil
}
func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
*target = str.Value
}
}
}
}
func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
d, err := time.ParseDuration(str.Value)
if err != nil {
c.addError(tbl, fmt.Errorf("error parsing duration: %w", err))
return
}
targetVal := reflect.ValueOf(target).Elem()
targetVal.Set(reflect.ValueOf(d))
}
}
}
}
func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) {
var err error
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
switch t := kv.Value.(type) {
case *ast.Boolean:
*target, err = t.Boolean()
if err != nil {
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value))
return
}
case *ast.String:
*target, err = strconv.ParseBool(t.Value)
if err != nil {
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value))
return
}
default:
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source()))
return
}
}
}
}
func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if iAst, ok := kv.Value.(*ast.Integer); ok {
i, err := iAst.Int()
if err != nil {
c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value))
return
}
*target = int(i)
}
}
}
}
func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if iAst, ok := kv.Value.(*ast.Integer); ok {
i, err := iAst.Int()
if err != nil {
c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value))
return
}
*target = i
}
}
}
}
func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
*target = append(*target, str.Value)
}
}
} else {
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName))
return
}
}
}
}
func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) {
if node, ok := tbl.Fields[fieldName]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
} else {
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName))
return
}
*target = append(*target, tagfilter)
}
}
}
}
}
func (c *Config) getFieldStringMap(tbl *ast.Table, fieldName string, target *map[string]string) {
*target = map[string]string{}
if node, ok := tbl.Fields[fieldName]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
(*target)[name] = str.Value
}
}
}
}
}
}
func keys(m map[string]bool) []string {
result := []string{}
for k := range m {
result = append(result, k)
}
return result
}
func (c *Config) hasErrs() bool {
return len(c.errs) > 0
}
func (c *Config) firstErr() error {
if len(c.errs) == 0 {
return nil
}
return c.errs[0]
}
func (c *Config) addError(tbl *ast.Table, err error) {
c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err))
}
// unwrappable lets you retrieve the original telegraf.Processor from the
// StreamingProcessor. This is necessary because the toml Unmarshaller won't
// look inside composed types.
type unwrappable interface {
Unwrap() telegraf.Processor
}
| [
"\"TELEGRAF_CONFIG_PATH\"",
"\"ProgramFiles\""
]
| []
| [
"TELEGRAF_CONFIG_PATH",
"ProgramFiles"
]
| [] | ["TELEGRAF_CONFIG_PATH", "ProgramFiles"] | go | 2 | 0 | |
from_config/run_trainings.py | import os, sys, tqdm, json, shutil, glob, argparse
import os.path as osp
from tensorflow.keras.backend import clear_session
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--f", type=str, required=False)
parser.add_argument("-gpu", "--gpu", type=str, required=False)
parser.add_argument("-cpus", "--cpus", type=str, required=False)
args = parser.parse_args()
if args.gpu!='all':
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
gpu_devices = tf.config.list_physical_devices('GPU')
if len(gpu_devices) > 0:
print("GPU detected")
for i in range(len(gpu_devices)):
tf.config.experimental.set_memory_growth(gpu_devices[i], True)
exp0_folder = str(args.f)
if args.cpus!='all':
os.environ['TF_NUM_INTRAOP_THREADS'] = args.cpus
SHUTDOWN = False
##########################################################
# Loop over JSON files and train models #
##########################################################
# Generate list over experiments to run
from dev.utils import list_experiments, clean_done, check_dataset
from dev.train_script import train_model
clean_done(exp0_folder)
exp_folder, exp_list = list_experiments(exp0_folder)
print(f"Starting process with {len(exp_list)} experiments")
print(exp_list)
# Loop over the experiments
for i, experiment in enumerate(exp_list):
# Load construction dictionary from json file
with open(osp.join(exp_folder, experiment)) as file:
construct_dict = json.load(file)
construct_dict['experiment_name']=experiment[:-5]
# data_exists=check_dataset(construct_dict['data_params']['database'], construct_dict['data_params']['muon'],\
# construct_dict['data_params']['n_data'], construct_dict['data_params']['graph_construction'])
# if data_exists:
# print('No data construction required')
# construct_dict['data_params']['restart']=False
print(f"Starting experiment from {experiment[:-5]}")
epochexit=train_model(construct_dict)
print(f'Exited training after {epochexit} epochs')
shutil.move(osp.join(exp_folder, experiment), osp.join(exp0_folder+"/done", experiment))
print(f"Experiment {experiment[:-5]} done \t {experiment}: {i + 1} / {len(exp_list)}")
clear_session()
# if SHUTDOWN == True:
# os.system("shutdown -h")
# Create a script to go through and test the performance
# test_model(model = construct_dict['Experiment'], data = instructions_to_dataset_name(construct_dict))
# We can setup a shutdown maybe
#os.system("shutdown -h 5")
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TF_NUM_INTRAOP_THREADS",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TF_NUM_INTRAOP_THREADS", "TF_CPP_MIN_LOG_LEVEL"] | python | 3 | 0 | |
ishell.go | // Package ishell implements an interactive shell.
package ishell
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"log"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"unicode"
"unicode/utf8"
"github.com/abiosoft/readline"
"github.com/fatih/color"
shlex "github.com/flynn-archive/go-shlex"
)
const (
defaultPrompt = ">>> "
defaultMultiPrompt = "... "
)
var (
errNoHandler = errors.New("incorrect input, try 'help'")
errNoInterruptHandler = errors.New("no interrupt handler")
strMultiChoice = " ❯"
strMultiChoiceWin = " >"
strMultiChoiceSpacer = " "
strMultiChoiceOpen = "⬡ "
strMultiChoiceSelect = "⬢ "
)
// Shell is an interactive cli shell.
type Shell struct {
rootCmd *Cmd
generic func(*Context)
interrupt func(*Context, int, string)
interruptCount int
eof func(*Context)
reader *shellReader
writer io.Writer
active bool
activeMutex sync.RWMutex
ignoreCase bool
customCompleter bool
multiChoiceActive bool
haltChan chan struct{}
historyFile string
autoHelp bool
rawArgs []string
progressBar ProgressBar
pager string
pagerArgs []string
contextValues
Actions
}
// New creates a new shell with default settings. Uses standard output and default prompt ">> ".
func New() *Shell {
return NewWithConfig(&readline.Config{Prompt: defaultPrompt})
}
// NewWithConfig creates a new shell with custom readline config.
func NewWithConfig(conf *readline.Config) *Shell {
rl, err := readline.NewEx(conf)
if err != nil {
log.Println("Shell or operating system not supported.")
log.Fatal(err)
}
return NewWithReadline(rl)
}
// NewWithReadline creates a new shell with a custom readline instance.
func NewWithReadline(rl *readline.Instance) *Shell {
shell := &Shell{
rootCmd: &Cmd{},
reader: &shellReader{
scanner: rl,
prompt: rl.Config.Prompt,
multiPrompt: defaultMultiPrompt,
showPrompt: true,
buf: &bytes.Buffer{},
completer: readline.NewPrefixCompleter(),
},
writer: rl.Config.Stdout,
autoHelp: true,
}
shell.Actions = &shellActionsImpl{Shell: shell}
shell.progressBar = newProgressBar(shell)
addDefaultFuncs(shell)
return shell
}
// Start starts the shell but does not wait for it to stop.
func (s *Shell) Start() {
s.prepareRun()
go s.run()
}
// Run starts the shell and waits for it to stop.
func (s *Shell) Run() {
s.prepareRun()
s.run()
}
// Wait waits for the shell to stop.
func (s *Shell) Wait() {
<-s.haltChan
}
// MultiChoiceActive returns true if the shell is in the multi choice selection mode
func (s *Shell) MultiChoiceActive() bool {
return s.multiChoiceActive
}
// RootCmd returns the shell's root command.
func (s *Shell) RootCmd() *Cmd {
return s.rootCmd
}
// SetRootCmd sets the shell's root command.
// Use with caution, this may affect the behaviour of the default completer.
func (s *Shell) SetRootCmd(cmd *Cmd) {
s.rootCmd = cmd
}
func (s *Shell) stop() {
if !s.Active() {
return
}
s.activeMutex.Lock()
s.active = false
s.activeMutex.Unlock()
close(s.haltChan)
}
// Close stops the shell (if required) and closes the shell's input.
// This should be called when done with reading inputs.
// Unlike `Stop`, a closed shell cannot be restarted.
func (s *Shell) Close() {
s.stop()
s.reader.scanner.Close()
}
func (s *Shell) prepareRun() {
if s.Active() {
return
}
if !s.customCompleter {
s.initCompleters()
}
s.activeMutex.Lock()
s.active = true
s.activeMutex.Unlock()
s.haltChan = make(chan struct{})
}
func (s *Shell) run() {
shell:
for s.Active() {
var line []string
var err error
read := make(chan struct{})
go func() {
line, err = s.read()
read <- struct{}{}
}()
select {
case <-read:
break
case <-s.haltChan:
continue shell
}
if err == io.EOF {
if s.eof == nil {
fmt.Println("EOF")
break
}
if err := handleEOF(s); err != nil {
s.Println("Error:", err)
continue
}
} else if err != nil && err != readline.ErrInterrupt {
s.Println("Error:", err)
continue
}
if err == readline.ErrInterrupt {
// interrupt received
err = handleInterrupt(s, line)
} else {
// reset interrupt counter
s.interruptCount = 0
// normal flow
if len(line) == 0 {
// no input line
continue
}
err = handleInput(s, line)
}
if err != nil {
s.Println("Error:", err)
}
}
}
// Active tells if the shell is active. i.e. Start is previously called.
func (s *Shell) Active() bool {
s.activeMutex.RLock()
defer s.activeMutex.RUnlock()
return s.active
}
// Process runs shell using args in a non-interactive mode.
func (s *Shell) Process(args ...string) error {
return handleInput(s, args)
}
func handleInput(s *Shell, line []string) error {
handled, err := s.handleCommand(line)
if handled || err != nil {
return err
}
// Generic handler
if s.generic == nil {
return errNoHandler
}
c := newContext(s, nil, line)
s.generic(c)
return c.err
}
func handleInterrupt(s *Shell, line []string) error {
if s.interrupt == nil {
return errNoInterruptHandler
}
c := newContext(s, nil, line)
s.interruptCount++
s.interrupt(c, s.interruptCount, strings.Join(line, " "))
return c.err
}
func handleEOF(s *Shell) error {
c := newContext(s, nil, nil)
s.eof(c)
return c.err
}
func (s *Shell) handleCommand(str []string) (bool, error) {
if s.ignoreCase {
for i := range str {
str[i] = strings.ToLower(str[i])
}
}
cmd, args := s.rootCmd.FindCmd(str)
if cmd == nil {
return false, nil
}
// trigger help if func is not registered or auto help is true
if cmd.Func == nil || (s.autoHelp && len(args) == 1 && args[0] == "help") {
s.Println(cmd.HelpText())
return true, nil
}
c := newContext(s, cmd, args)
cmd.Func(c)
return true, c.err
}
func (s *Shell) readLine() (line string, err error) {
consumer := make(chan lineString)
defer close(consumer)
go s.reader.readLine(consumer)
ls := <-consumer
return ls.line, ls.err
}
func (s *Shell) read() ([]string, error) {
s.rawArgs = nil
heredoc := false
eof := ""
// heredoc multiline
lines, err := s.readMultiLinesFunc(func(line string) bool {
if !heredoc {
if strings.Contains(line, "<<") {
s := strings.SplitN(line, "<<", 2)
if eof = strings.TrimSpace(s[1]); eof != "" {
heredoc = true
return true
}
}
} else {
return line != eof
}
return strings.HasSuffix(strings.TrimSpace(line), "\\")
})
s.rawArgs = strings.Fields(lines)
if heredoc {
s := strings.SplitN(lines, "<<", 2)
args, err1 := shlex.Split(s[0])
arg := strings.TrimSuffix(strings.SplitN(s[1], "\n", 2)[1], eof)
args = append(args, arg)
if err1 != nil {
return args, err1
}
return args, err
}
lines = strings.Replace(lines, "\\\n", " \n", -1)
args, err1 := shlex.Split(lines)
if err1 != nil {
return args, err1
}
return args, err
}
func (s *Shell) readMultiLinesFunc(f func(string) bool) (string, error) {
var lines bytes.Buffer
currentLine := 0
var err error
for {
if currentLine == 1 {
// from second line, enable next line prompt.
s.reader.setMultiMode(true)
}
var line string
line, err = s.readLine()
fmt.Fprint(&lines, line)
if !f(line) || err != nil {
break
}
fmt.Fprintln(&lines)
currentLine++
}
if currentLine > 0 {
// if more than one line is read
// revert to standard prompt.
s.reader.setMultiMode(false)
}
return lines.String(), err
}
func (s *Shell) initCompleters() {
s.setCompleter(iCompleter{cmd: s.rootCmd, disabled: func() bool { return s.multiChoiceActive }})
}
func (s *Shell) setCompleter(completer readline.AutoCompleter) {
config := s.reader.scanner.Config.Clone()
config.AutoComplete = completer
s.reader.scanner.SetConfig(config)
}
// CustomCompleter allows use of custom implementation of readline.Autocompleter.
func (s *Shell) CustomCompleter(completer readline.AutoCompleter) {
s.customCompleter = true
s.setCompleter(completer)
}
// AddCmd adds a new command handler.
// This only adds top level commands.
func (s *Shell) AddCmd(cmd *Cmd) {
s.rootCmd.AddCmd(cmd)
}
// DeleteCmd deletes a top level command.
func (s *Shell) DeleteCmd(name string) {
s.rootCmd.DeleteCmd(name)
}
// NotFound adds a generic function for all inputs.
// It is called if the shell input could not be handled by any of the
// added commands.
func (s *Shell) NotFound(f func(*Context)) {
s.generic = f
}
// AutoHelp sets if ishell should trigger help message if
// a command's arg is "help". Defaults to true.
//
// This can be set to false for more control on how help is
// displayed.
func (s *Shell) AutoHelp(enable bool) {
s.autoHelp = enable
}
// Interrupt adds a function to handle keyboard interrupt (Ctrl-c).
// count is the number of consecutive times that Ctrl-c has been pressed.
// i.e. any input apart from Ctrl-c resets count to 0.
func (s *Shell) Interrupt(f func(c *Context, count int, input string)) {
s.interrupt = f
}
// EOF adds a function to handle End of File input (Ctrl-d).
// This overrides the default behaviour which terminates the shell.
func (s *Shell) EOF(f func(c *Context)) {
s.eof = f
}
// SetHistoryPath sets where readlines history file location. Use an empty
// string to disable history file. It is empty by default.
func (s *Shell) SetHistoryPath(path string) {
// Using scanner.SetHistoryPath doesn't initialize things properly and
// history file is never written. Simpler to just create a new readline
// Instance.
config := s.reader.scanner.Config.Clone()
config.HistoryFile = path
s.reader.scanner, _ = readline.NewEx(config)
}
// SetHomeHistoryPath is a convenience method that sets the history path
// in user's home directory.
func (s *Shell) SetHomeHistoryPath(path string) {
var home string
// Try to get the home directory with user.Current.
// If error occurs, use environment variables
user, err := user.Current()
if err == nil {
home = user.HomeDir
} else {
if runtime.GOOS == "windows" {
home = os.Getenv("USERPROFILE")
} else {
home = os.Getenv("HOME")
}
}
abspath := filepath.Join(home, path)
s.SetHistoryPath(abspath)
}
// SetOut sets the writer to write outputs to.
func (s *Shell) SetOut(writer io.Writer) {
s.writer = writer
}
// SetPager sets the pager and its arguments for paged output
func (s *Shell) SetPager(pager string, args []string) {
s.pager = pager
s.pagerArgs = args
}
func initSelected(init []int, max int) []int {
selectedMap := make(map[int]bool)
for _, i := range init {
if i < max {
selectedMap[i] = true
}
}
selected := make([]int, len(selectedMap))
i := 0
for k := range selectedMap {
selected[i] = k
i++
}
return selected
}
func toggle(selected []int, cur int) []int {
for i, s := range selected {
if s == cur {
return append(selected[:i], selected[i+1:]...)
}
}
return append(selected, cur)
}
func (s *Shell) multiChoice(options []string, text string, init []int, multiResults bool) []int {
s.multiChoiceActive = true
defer func() { s.multiChoiceActive = false }()
conf := s.reader.scanner.Config.Clone()
conf.DisableAutoSaveHistory = true
conf.FuncFilterInputRune = func(r rune) (rune, bool) {
switch r {
case 16:
return -1, true
case 14:
return -2, true
case 32:
return -3, true
}
return r, true
}
var selected []int
if multiResults {
selected = initSelected(init, len(options))
}
s.ShowPrompt(false)
defer s.ShowPrompt(true)
// TODO this may not work on windows.
s.Print("\033[?25l")
defer s.Print("\033[?25h")
cur := 0
if len(selected) > 0 {
cur = selected[len(selected)-1]
}
fd := int(os.Stdout.Fd())
_, maxRows, err := readline.GetSize(fd)
if err != nil {
return nil
}
// move cursor to the top
// TODO it happens on every update, however, some trash appears in history without this line
s.Print("\033[0;0H")
offset := fd
update := func() {
strs := buildOptionsStrings(options, selected, cur)
if len(strs) > maxRows-1 {
strs = strs[offset : maxRows+offset-1]
}
s.Print("\033[0;0H")
// clear from the cursor to the end of the screen
s.Print("\033[0J")
s.Println(text)
s.Print(strings.Join(strs, "\n"))
}
var lastKey rune
refresh := make(chan struct{}, 1)
listener := func(line []rune, pos int, key rune) (newline []rune, newPos int, ok bool) {
lastKey = key
if key == -2 {
cur++
if cur >= maxRows+offset-1 {
offset++
}
if cur >= len(options) {
offset = fd
cur = 0
}
} else if key == -1 {
cur--
if cur < offset {
offset--
}
if cur < 0 {
if len(options) > maxRows-1 {
offset = len(options) - maxRows + 1
} else {
offset = fd
}
cur = len(options) - 1
}
} else if key == -3 {
if multiResults {
selected = toggle(selected, cur)
}
}
refresh <- struct{}{}
return
}
conf.Listener = readline.FuncListener(listener)
oldconf := s.reader.scanner.SetConfig(conf)
stop := make(chan struct{})
defer func() {
stop <- struct{}{}
s.Println()
}()
t := time.NewTicker(time.Millisecond * 200)
defer t.Stop()
go func() {
for {
select {
case <-stop:
return
case <-refresh:
update()
case <-t.C:
_, rows, _ := readline.GetSize(fd)
if maxRows != rows {
maxRows = rows
update()
}
}
}
}()
s.ReadLine()
s.reader.scanner.SetConfig(oldconf)
// only handles Ctrl-c for now
// this can be broaden later
switch lastKey {
// Ctrl-c
case 3:
return []int{-1}
}
if multiResults {
return selected
}
return []int{cur}
}
func buildOptionsStrings(options []string, selected []int, index int) []string {
var strs []string
symbol := strMultiChoice
if runtime.GOOS == "windows" {
symbol = strMultiChoiceWin
}
for i, opt := range options {
mark := strMultiChoiceOpen
if selected == nil {
mark = strMultiChoiceSpacer
}
for _, s := range selected {
if s == i {
mark = strMultiChoiceSelect
}
}
if i == index {
cyan := color.New(color.FgCyan).Add(color.Bold).SprintFunc()
strs = append(strs, cyan(symbol+mark+opt))
} else {
strs = append(strs, strings.Repeat(" ", utf8.RuneCountInString(symbol))+mark+opt)
}
}
return strs
}
// IgnoreCase specifies whether commands should not be case sensitive.
// Defaults to false i.e. commands are case sensitive.
// If true, commands must be registered in lower cases.
func (s *Shell) IgnoreCase(ignore bool) {
s.ignoreCase = ignore
}
// ProgressBar returns the progress bar for the shell.
func (s *Shell) ProgressBar() ProgressBar {
return s.progressBar
}
func newContext(s *Shell, cmd *Cmd, args []string) *Context {
if cmd == nil {
cmd = &Cmd{}
}
return &Context{
Actions: s.Actions,
progressBar: copyShellProgressBar(s),
Args: args,
RawArgs: s.rawArgs,
Cmd: *cmd,
contextValues: func() contextValues {
values := contextValues{}
for k := range s.contextValues {
values[k] = s.contextValues[k]
}
return values
}(),
}
}
func copyShellProgressBar(s *Shell) ProgressBar {
sp := s.progressBar.(*progressBarImpl)
p := newProgressBar(s)
p.Indeterminate(sp.indeterminate)
p.Display(sp.display)
p.Prefix(sp.prefix)
p.Suffix(sp.suffix)
p.Final(sp.final)
p.Interval(sp.interval)
return p
}
func getPosition() (int, int, error) {
fd := int(os.Stdout.Fd())
state, err := readline.MakeRaw(fd)
if err != nil {
return 0, 0, err
}
defer readline.Restore(fd, state)
fmt.Printf("\033[6n")
var out string
reader := bufio.NewReader(os.Stdin)
if err != nil {
return 0, 0, err
}
for {
b, err := reader.ReadByte()
if err != nil || b == 'R' {
break
}
if unicode.IsPrint(rune(b)) {
out += string(b)
}
}
var row, col int
_, err = fmt.Sscanf(out, "[%d;%d", &row, &col)
if err != nil {
return 0, 0, err
}
return col, row, nil
}
| [
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
pipeline/installs/tekton-multi-user/sync.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
kfp_version = os.environ["KFP_VERSION"]
disable_istio_sidecar = os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
mlpipeline_minio_access_key = base64.b64encode(
bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
mlpipeline_minio_secret_key = base64.b64encode(
bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# HACK: Currently using serving.kubeflow.org/inferenceservice to identify
# kubeflow user namespaces.
# TODO: let Kubeflow profile controller add a pipeline specific label to
# user namespaces and use that label instead.
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("serving.kubeflow.org/inferenceservice")
if not pipeline_enabled:
return {"status": {}, "children": []}
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready": \
len(children["Secret.v1"]) == 1 and \
len(children["ConfigMap.v1"]) == 1 and \
len(children["Deployment.apps/v1"]) == 2 and \
len(children["Service.v1"]) == 2 and \
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and \
len(children["ServiceRole.rbac.istio.io/v1alpha1"]) == 1 and \
len(children["ServiceRoleBinding.rbac.istio.io/v1alpha1"]) == 1 and \
"True" or "False"
}
# Generate the desired child object(s).
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
desired_resources = [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image":
"gcr.io/ml-pipeline/visualization-server:" +
kfp_version,
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "rbac.istio.io/v1alpha1",
"kind": "ServiceRole",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"rules": [{
"services": ["ml-pipeline-visualizationserver.*"]
}]
}
},
{
"apiVersion": "rbac.istio.io/v1alpha1",
"kind": "ServiceRoleBinding",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"subjects": [{
"properties": {
"source.principal":
"cluster.local/ns/kubeflow/sa/ml-pipeline"
}
}],
"roleRef": {
"kind": "ServiceRole",
"name": "ml-pipeline-visualizationserver"
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image":
"gcr.io/ml-pipeline/frontend:" + kfp_version,
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}]
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
{
"apiVersion": "tekton.dev/v1alpha1",
"kind": "Condition",
"metadata": {
"name": "super-condition",
"namespace": namespace,
},
"spec": {
"check": {
"image": "python:alpine3.6",
"script": ("python -c 'import sys\ninput1=str.rstrip(sys.argv[1])\n"
"input2=str.rstrip(sys.argv[2])\ntry:\n input1=int(input1)\n"
" input2=int(input2)\nexcept:\n input1=str(input1)\nsys.exit(0)"
" if (input1 $(params.operator) input2) else sys.exit(1)' "
"'$(params.operand1)' '$(params.operand2)'")
},
"params": [
{
"name": "operand1",
"type": "string"
},
{
"name": "operand2",
"type": "string"
},
{
"name": "operator",
"type": "string"
}
]
}
},
]
print('Received request:', parent)
print('Desired resources except secrets:', desired_resources)
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": mlpipeline_minio_access_key,
"secretkey": mlpipeline_minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
HTTPServer(("", 80), Controller).serve_forever()
| []
| []
| [
"KFP_VERSION",
"MINIO_SECRET_KEY",
"DISABLE_ISTIO_SIDECAR",
"MINIO_ACCESS_KEY"
]
| [] | ["KFP_VERSION", "MINIO_SECRET_KEY", "DISABLE_ISTIO_SIDECAR", "MINIO_ACCESS_KEY"] | python | 4 | 0 | |
config/wsgi.py | """
WSGI config for acatto_web project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# acatto_web directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'acatto_web'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
test/packetimpact/runner/packetimpact_test.go | // Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The runner starts docker containers and networking for a packetimpact test.
package packetimpact_test
import (
"context"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"os"
"os/exec"
"path"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/packetimpact/netdevs"
)
// stringList implements flag.Value.
type stringList []string
// String implements flag.Value.String.
func (l *stringList) String() string {
return strings.Join(*l, ",")
}
// Set implements flag.Value.Set.
func (l *stringList) Set(value string) error {
*l = append(*l, value)
return nil
}
var (
native = flag.Bool("native", false, "whether the test should be run natively")
testbenchBinary = flag.String("testbench_binary", "", "path to the testbench binary")
tshark = flag.Bool("tshark", false, "use more verbose tshark in logs instead of tcpdump")
extraTestArgs = stringList{}
expectFailure = flag.Bool("expect_failure", false, "expect that the test will fail when run")
dutAddr = net.IPv4(0, 0, 0, 10)
testbenchAddr = net.IPv4(0, 0, 0, 20)
)
const ctrlPort = "40000"
// logger implements testutil.Logger.
//
// Labels logs based on their source and formats multi-line logs.
type logger string
// Name implements testutil.Logger.Name.
func (l logger) Name() string {
return string(l)
}
// Logf implements testutil.Logger.Logf.
func (l logger) Logf(format string, args ...interface{}) {
lines := strings.Split(fmt.Sprintf(format, args...), "\n")
log.Printf("%s: %s", l, lines[0])
for _, line := range lines[1:] {
log.Printf("%*s %s", len(l), "", line)
}
}
func TestOne(t *testing.T) {
flag.Var(&extraTestArgs, "extra_test_arg", "extra arguments to pass to the testbench")
flag.Parse()
if *testbenchBinary == "" {
t.Fatal("--testbench_binary is missing")
}
dockerutil.EnsureSupportedDockerVersion()
ctx := context.Background()
// Create the networks needed for the test. One control network is needed for
// the gRPC control packets and one test network on which to transmit the test
// packets.
ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet"))
testNet := dockerutil.NewNetwork(ctx, logger("testNet"))
for _, dn := range []*dockerutil.Network{ctrlNet, testNet} {
for {
if err := createDockerNetwork(ctx, dn); err != nil {
t.Log("creating docker network:", err)
const wait = 100 * time.Millisecond
t.Logf("sleeping %s and will try creating docker network again", wait)
// This can fail if another docker network claimed the same IP so we'll
// just try again.
time.Sleep(wait)
continue
}
break
}
defer func(dn *dockerutil.Network) {
if err := dn.Cleanup(ctx); err != nil {
t.Errorf("unable to cleanup container %s: %s", dn.Name, err)
}
}(dn)
// Sanity check.
inspect, err := dn.Inspect(ctx)
if err != nil {
t.Fatalf("failed to inspect network %s: %v", dn.Name, err)
} else if inspect.Name != dn.Name {
t.Fatalf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name)
}
}
tmpDir, err := ioutil.TempDir("", "container-output")
if err != nil {
t.Fatal("creating temp dir:", err)
}
defer os.RemoveAll(tmpDir)
const testOutputDir = "/tmp/testoutput"
// Create the Docker container for the DUT.
var dut *dockerutil.Container
if *native {
dut = dockerutil.MakeNativeContainer(ctx, logger("dut"))
} else {
dut = dockerutil.MakeContainer(ctx, logger("dut"))
}
runOpts := dockerutil.RunOpts{
Image: "packetimpact",
CapAdd: []string{"NET_ADMIN"},
Mounts: []mount.Mount{mount.Mount{
Type: mount.TypeBind,
Source: tmpDir,
Target: testOutputDir,
ReadOnly: false,
}},
}
const containerPosixServerBinary = "/packetimpact/posix_server"
dut.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/dut/posix_server")
conf, hostconf, _ := dut.ConfigsFrom(runOpts, containerPosixServerBinary, "--ip=0.0.0.0", "--port="+ctrlPort)
hostconf.AutoRemove = true
hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
if err := dut.CreateFrom(ctx, conf, hostconf, nil); err != nil {
t.Fatalf("unable to create container %s: %v", dut.Name, err)
}
defer dut.CleanUp(ctx)
// Add ctrlNet as eth1 and testNet as eth2.
const testNetDev = "eth2"
if err := addNetworks(ctx, dut, dutAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil {
t.Fatal(err)
}
if err := dut.Start(ctx); err != nil {
t.Fatalf("unable to start container %s: %s", dut.Name, err)
}
if _, err := dut.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil {
t.Fatalf("%s on container %s never listened: %s", containerPosixServerBinary, dut.Name, err)
}
dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet))
if err != nil {
t.Fatal(err)
}
remoteMAC := dutDeviceInfo.MAC
remoteIPv6 := dutDeviceInfo.IPv6Addr
// Netstack as DUT doesn't assign IPv6 addresses automatically so do it if
// needed.
if remoteIPv6 == nil {
if _, err := dut.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil {
t.Fatalf("unable to ip addr add on container %s: %s", dut.Name, err)
}
// Now try again, to make sure that it worked.
_, dutDeviceInfo, err = deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet))
if err != nil {
t.Fatal(err)
}
remoteIPv6 = dutDeviceInfo.IPv6Addr
if remoteIPv6 == nil {
t.Fatal("unable to set IPv6 address on container", dut.Name)
}
}
// Create the Docker container for the testbench.
testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench"))
tbb := path.Base(*testbenchBinary)
containerTestbenchBinary := "/packetimpact/" + tbb
runOpts = dockerutil.RunOpts{
Image: "packetimpact",
CapAdd: []string{"NET_ADMIN"},
Mounts: []mount.Mount{mount.Mount{
Type: mount.TypeBind,
Source: tmpDir,
Target: testOutputDir,
ReadOnly: false,
}},
}
testbench.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/tests/"+tbb)
// Run tcpdump in the test bench unbuffered, without DNS resolution, just on
// the interface with the test packets.
snifferArgs := []string{
"tcpdump",
"-S", "-vvv", "-U", "-n",
"-i", testNetDev,
"-w", testOutputDir + "/dump.pcap",
}
snifferRegex := "tcpdump: listening.*\n"
if *tshark {
// Run tshark in the test bench unbuffered, without DNS resolution, just on
// the interface with the test packets.
snifferArgs = []string{
"tshark", "-V", "-l", "-n", "-i", testNetDev,
"-o", "tcp.check_checksum:TRUE",
"-o", "udp.check_checksum:TRUE",
}
snifferRegex = "Capturing on.*\n"
}
defer func() {
if err := exec.Command("/bin/cp", "-r", tmpDir, os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")).Run(); err != nil {
t.Error("unable to copy container output files:", err)
}
}()
conf, hostconf, _ = testbench.ConfigsFrom(runOpts, snifferArgs...)
hostconf.AutoRemove = true
hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
if err := testbench.CreateFrom(ctx, conf, hostconf, nil); err != nil {
t.Fatalf("unable to create container %s: %s", testbench.Name, err)
}
defer testbench.CleanUp(ctx)
// Add ctrlNet as eth1 and testNet as eth2.
if err := addNetworks(ctx, testbench, testbenchAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil {
t.Fatal(err)
}
if err := testbench.Start(ctx); err != nil {
t.Fatalf("unable to start container %s: %s", testbench.Name, err)
}
// Kill so that it will flush output.
defer func() {
time.Sleep(1 * time.Second)
testbench.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferArgs[0])
}()
if _, err := testbench.WaitForOutput(ctx, snifferRegex, 60*time.Second); err != nil {
t.Fatalf("sniffer on %s never listened: %s", dut.Name, err)
}
// Because the Linux kernel receives the SYN-ACK but didn't send the SYN it
// will issue an RST. To prevent this IPtables can be used to filter out all
// incoming packets. The raw socket that packetimpact tests use will still see
// everything.
for _, bin := range []string{"iptables", "ip6tables"} {
if logs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, bin, "-A", "INPUT", "-i", testNetDev, "-p", "tcp", "-j", "DROP"); err != nil {
t.Fatalf("unable to Exec %s on container %s: %s, logs from testbench:\n%s", bin, testbench.Name, err, logs)
}
}
// FIXME(b/156449515): Some piece of the system has a race. The old
// bash script version had a sleep, so we have one too. The race should
// be fixed and this sleep removed.
time.Sleep(time.Second)
// Start a packetimpact test on the test bench. The packetimpact test sends
// and receives packets and also sends POSIX socket commands to the
// posix_server to be executed on the DUT.
testArgs := []string{containerTestbenchBinary}
testArgs = append(testArgs, extraTestArgs...)
testArgs = append(testArgs,
"--posix_server_ip", addressInSubnet(dutAddr, *ctrlNet.Subnet).String(),
"--posix_server_port", ctrlPort,
"--remote_ipv4", addressInSubnet(dutAddr, *testNet.Subnet).String(),
"--local_ipv4", addressInSubnet(testbenchAddr, *testNet.Subnet).String(),
"--remote_ipv6", remoteIPv6.String(),
"--remote_mac", remoteMAC.String(),
"--remote_interface_id", fmt.Sprintf("%d", dutDeviceInfo.ID),
"--device", testNetDev,
fmt.Sprintf("--native=%t", *native),
)
testbenchLogs, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
if (err != nil) != *expectFailure {
var dutLogs string
if logs, err := dut.Logs(ctx); err != nil {
dutLogs = fmt.Sprintf("failed to fetch DUT logs: %s", err)
} else {
dutLogs = logs
}
t.Errorf(`test error: %v, expect failure: %t
====== Begin of DUT Logs ======
%s
====== End of DUT Logs ======
====== Begin of Testbench Logs ======
%s
====== End of Testbench Logs ======`,
err, *expectFailure, dutLogs, testbenchLogs)
}
}
func addNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, networks []*dockerutil.Network) error {
for _, dn := range networks {
ip := addressInSubnet(addr, *dn.Subnet)
// Connect to the network with the specified IP address.
if err := dn.Connect(ctx, d, ip.String(), ""); err != nil {
return fmt.Errorf("unable to connect container %s to network %s: %w", d.Name, dn.Name, err)
}
}
return nil
}
// addressInSubnet combines the subnet provided with the address and returns a
// new address. The return address bits come from the subnet where the mask is 1
// and from the ip address where the mask is 0.
func addressInSubnet(addr net.IP, subnet net.IPNet) net.IP {
var octets []byte
for i := 0; i < 4; i++ {
octets = append(octets, (subnet.IP.To4()[i]&subnet.Mask[i])+(addr.To4()[i]&(^subnet.Mask[i])))
}
return net.IP(octets)
}
// createDockerNetwork makes a randomly-named network that will start with the
// namePrefix. The network will be a random /24 subnet.
func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {
randSource := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(randSource)
// Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24.
ip := net.IPv4(byte(r1.Intn(224-192)+192), byte(r1.Intn(256)), byte(r1.Intn(256)), 0)
n.Subnet = &net.IPNet{
IP: ip,
Mask: ip.DefaultMask(),
}
return n.Create(ctx)
}
// deviceByIP finds a deviceInfo and device name from an IP address.
func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) {
out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show")
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w", d.Name, err)
}
devs, err := netdevs.ParseDevices(out)
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("parsing devices from %s container: %w", d.Name, err)
}
testDevice, deviceInfo, err := netdevs.FindDeviceByIP(ip, devs)
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("can't find deviceInfo for container %s: %w", d.Name, err)
}
return testDevice, deviceInfo, nil
}
| [
"\"TEST_UNDECLARED_OUTPUTS_DIR\""
]
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | go | 1 | 0 | |
jira/config.go | package jira
import (
"encoding/base64"
"fmt"
"net/url"
"os"
"strconv"
"strings"
"time"
)
type Config struct {
BaseURL string
Query string
Filter string
FieldNames string
MaxResult int
ApiVersion string
TimeUnit string
HoursPerDay int
DaysPerMonth int
Worklog bool
TargetYearMonth string
clock func() time.Time
}
const (
maxWorkerSize = 10
defaultMaxResult = 50
defaultHoursPerDay = 8
defaultDaysPerMonth = 24
defaultJiraRestApiVersion = "3"
usageText = `Usage of jira-timespent-report (v%s):
$ jira-timespent-report [options]
Example:
# get csv report by cli
$ AUTH_USER=yyyy AUTH_TOKEN=aaaabbbb jira-timespent-report -url https://your-jira.atlassian.net -maxresult 10 -unit dd -query "status = Closed" -targetym 2020-08
# get csv report by http server
$ AUTH_USER=yyyy AUTH_TOKEN=aaaabbbb jira-timespent-report -server &
$ curl localhost:8080/?url=https://your-jira.atlassian.net&maxresult=10&unit=dd&query=status+%%3DClosed&targetym=2020-08
Options:
`
)
var (
config = &Config{clock: time.Now}
defaultFieldText = map[string]string{
"summary": "概要",
"status": "ステータス",
"timeoriginalestimate": "初期見積もり",
"timespent": "消費時間",
"aggregatetimeoriginalestimate": "Σ初期見積もり",
"aggregatetimespent": "Σ消費時間",
"started": "開始日時",
"author.displayname": "表示名",
"author.emailaddress": "メールアドレス",
"timespentseconds": "消費時間",
}
)
func (c *Config) SetQueryParams(queryParams url.Values) {
for key, vs := range queryParams {
if len(vs) == 0 {
continue
}
value := vs[0]
switch strings.ToLower(key) {
case "baseurl":
c.BaseURL = value
case "query":
c.Query = value
case "filter":
c.Filter = value
case "fieldnames":
c.FieldNames = value
case "maxresult":
i, _ := strconv.Atoi(value)
c.MaxResult = i
case "apiversion":
c.ApiVersion = value
case "timeunit":
c.TimeUnit = value
case "hoursperday":
i, _ := strconv.Atoi(value)
c.HoursPerDay = i
case "dayspermonth":
i, _ := strconv.Atoi(value)
c.DaysPerMonth = i
case "worklog":
b, _ := strconv.ParseBool(value)
c.Worklog = b
case "targetyearmonth":
c.TargetYearMonth = value
}
}
}
func (c *Config) fields() []string {
if c.Worklog {
return []string{
"started",
"author.displayname",
"author.emailaddress",
"timespentseconds",
}
}
return strings.Split(c.FieldNames, ",")
}
func (c *Config) checkAuthEnv() error {
user := os.Getenv("AUTH_USER")
token := os.Getenv("AUTH_TOKEN")
if len(user) == 0 || len(token) == 0 {
return fmt.Errorf("環境変数 AUTH_USER/AUTH_TOKEN が未定義")
}
return nil
}
func (c *Config) basicAuthorization() string {
if err := c.checkAuthEnv(); err != nil {
panic(err)
}
user := os.Getenv("AUTH_USER")
token := os.Getenv("AUTH_TOKEN")
return fmt.Sprintf("Basic %s", base64.URLEncoding.EncodeToString([]byte(user+":"+token)))
}
func (c *Config) dateCondition() (string, bool) {
targetTime, err := time.Parse("2006-01-02", c.TargetYearMonth+"-01")
if err != nil {
return "", false
}
currentTime := c.clock()
if targetTime.Year() > currentTime.Year() {
return "", false
}
offset := 0
if targetTime.Year() == currentTime.Year() {
if targetTime.Month() > currentTime.Month() {
return "", false
}
offset = int(targetTime.Month() - currentTime.Month())
} else {
monthDiff := 12 - int(targetTime.Month()) + int(currentTime.Month())
yearDiff := (currentTime.Year() - targetTime.Year() - 1) * 12
offset = -monthDiff - yearDiff
}
if c.Worklog {
return fmt.Sprintf("worklogDate >= startOfMonth(%d) AND worklogDate <= endOfMonth(%d)", offset, offset), true
}
return fmt.Sprintf("updated >= startOfMonth(%d) AND updated <= endOfMonth(%d)", offset, offset), true
}
func (c *Config) FilterURL(filterID string) (*url.URL, error) {
u, err := url.Parse(c.BaseURL)
if err != nil {
return nil, fmt.Errorf("url.Parse error: %v\nBaseURL=[%v]", err, c.BaseURL)
}
u.Path = fmt.Sprintf("/rest/api/%s/filter/%s", c.ApiVersion, filterID)
return u, nil
}
func (c *Config) SearchURL() (*url.URL, error) {
u, err := url.Parse(c.BaseURL)
if err != nil {
return nil, fmt.Errorf("url.Parse error: %v\nBaseURL=[%v]", err, c.BaseURL)
}
u.Path = fmt.Sprintf("/rest/api/%s/search", c.ApiVersion)
return u, nil
}
func (c *Config) WorklogURL(key string, queryParams url.Values) (*url.URL, error) {
u, err := url.Parse(c.BaseURL)
if err != nil {
return nil, fmt.Errorf("url.Parse error: %v\nBaseURL=[%v]", err, c.BaseURL)
}
u.Path = fmt.Sprintf("/rest/api/%s/issue/%s/worklog", c.ApiVersion, key)
u.RawQuery = queryParams.Encode()
return u, nil
}
func (c *Config) WithTimeUnit(second int) float32 {
switch strings.ToLower(c.TimeUnit) {
case "h", "hh":
return float32(second) / float32(60*60)
case "d", "dd":
return float32(second) / float32(60*60*c.HoursPerDay)
case "m", "mm":
return float32(second) / float32(60*60*c.HoursPerDay*c.DaysPerMonth)
default:
return 0.0
}
}
func (c *Config) TargetMonth() (*time.Time, error) {
if len(c.TargetYearMonth) > 0 {
t, err := time.Parse("2006-01-02", c.TargetYearMonth+"-01")
if err != nil {
return nil, fmt.Errorf("TargetMonth: error %v", err)
}
return &t, nil
}
t := time.Now().AddDate(0, -1, 0)
return &t, nil
}
func (c *Config) StartedAfter() string {
t, err := c.TargetMonth()
if err != nil {
return ""
}
startOfMonth := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
return fmt.Sprintf("%d", startOfMonth.UnixNano()/int64(time.Millisecond))
}
| [
"\"AUTH_USER\"",
"\"AUTH_TOKEN\"",
"\"AUTH_USER\"",
"\"AUTH_TOKEN\""
]
| []
| [
"AUTH_USER",
"AUTH_TOKEN"
]
| [] | ["AUTH_USER", "AUTH_TOKEN"] | go | 2 | 0 | |
openchain/container/util/write.go | package util
import (
"archive/tar"
"bufio"
"fmt"
"github.com/op/go-logging"
"io"
"os"
"path/filepath"
"strings"
"time"
)
var vmLogger = logging.MustGetLogger("container")
func WriteGopathSrc(tw *tar.Writer, excludeDir string) error {
gopath := os.Getenv("GOPATH")
if strings.LastIndex(gopath, "/") == len(gopath) - 1 {
gopath = gopath[:len(gopath)]
}
rootDirectory := fmt.Sprintf("%s%s%s", os.Getenv("GOPATH"), string(os.PathSeparator), "src")
vmLogger.Info("rootDirectory = %s", rootDirectory)
//append "/" if necessary
if excludeDir != "" && strings.LastIndex(excludeDir, "/") < len(excludeDir) - 1 {
excludeDir = excludeDir + "/"
}
rootDirLen := len(rootDirectory)
walkFn := func(path string, info os.FileInfo, err error) error {
// If path includes .git, ignore
if strings.Contains(path, ".git") {
return nil
}
if info.Mode().IsDir() {
return nil
}
//exclude any files with excludeDir prefix. They should already be in the tar
if excludeDir != "" && strings.Index(path, excludeDir) == rootDirLen + 1 {
//1 for "/"
return nil
}
// Because of scoping we can reference the external rootDirectory variable
newPath := fmt.Sprintf("src%s", path[rootDirLen:])
//newPath := path[len(rootDirectory):]
if len(newPath) == 0 {
return nil
}
err = WriteFileToPackage(path, newPath, tw)
if err != nil {
return fmt.Errorf("Error writing file to package: %s", err)
}
return nil
}
if err := filepath.Walk(rootDirectory, walkFn); err != nil {
vmLogger.Info("Error walking rootDirectory: %s", err)
return err
}
// Write the tar file out
if err := tw.Close(); err != nil {
return err
}
//ioutil.WriteFile("/tmp/chaincode_deployment.tar", inputbuf.Bytes(), 0644)
return nil
}
func WriteFileToPackage(localpath string, packagepath string, tw *tar.Writer) error {
fd, err := os.Open(localpath)
if err != nil {
return fmt.Errorf("%s: %s", localpath, err)
}
defer fd.Close()
is := bufio.NewReader(fd)
return WriteStreamToPackage(is, localpath, packagepath, tw)
}
func WriteStreamToPackage(is io.Reader, localpath string, packagepath string, tw *tar.Writer) error {
info, err := os.Stat(localpath)
if err != nil {
return fmt.Errorf("%s: %s", localpath, err)
}
header, err := tar.FileInfoHeader(info, localpath)
if err != nil {
return fmt.Errorf("Error getting FileInfoHeader: %s", err)
}
//Let's take the variance out of the tar, make headers identical by using zero time
oldname := header.Name
var zeroTime time.Time
header.AccessTime = zeroTime
header.ModTime = zeroTime
header.ChangeTime = zeroTime
header.Name = packagepath
if err = tw.WriteHeader(header); err != nil {
return fmt.Errorf("Error write header for (path: %s, oldname:%s,newname:%s,sz:%d) : %s", localpath, oldname, packagepath, header.Size, err)
}
if _, err := io.Copy(tw, is); err != nil {
return fmt.Errorf("Error copy (path: %s, oldname:%s,newname:%s,sz:%d) : %s", localpath, oldname, packagepath, header.Size, err)
}
return nil
}
| [
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
certbot/certbot/_internal/client.py | """Certbot client API."""
import datetime
import logging
import platform
from typing import Optional
from cryptography.hazmat.backends import default_backend
# See https://github.com/pyca/cryptography/issues/4275
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key # type: ignore
import josepy as jose
import OpenSSL
import zope.component
from acme import client as acme_client
from acme import crypto_util as acme_crypto_util
from acme import errors as acme_errors
from acme import messages
import certbot
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import util
from certbot._internal import account
from certbot._internal import auth_handler
from certbot._internal import cli
from certbot._internal import constants
from certbot._internal import eff
from certbot._internal import error_handler
from certbot._internal import storage
from certbot._internal.plugins import selection as plugin_selection
from certbot.compat import os
from certbot.display import ops as display_ops
logger = logging.getLogger(__name__)
def acme_from_config_key(config, key, regr=None):
"Wrangle ACME client construction"
# TODO: Allow for other alg types besides RS256
net = acme_client.ClientNetwork(key, account=regr, verify_ssl=(not config.no_verify_ssl),
user_agent=determine_user_agent(config))
return acme_client.BackwardsCompatibleClientV2(net, key, config.server)
def determine_user_agent(config):
"""
Set a user_agent string in the config based on the choice of plugins.
(this wasn't knowable at construction time)
:returns: the client's User-Agent string
:rtype: `str`
"""
# WARNING: To ensure changes are in line with Certbot's privacy
# policy, talk to a core Certbot team member before making any
# changes here.
if config.user_agent is None:
ua = ("CertbotACMEClient/{0} ({1}; {2}{8}) Authenticator/{3} Installer/{4} "
"({5}; flags: {6}) Py/{7}")
if os.environ.get("CERTBOT_DOCS") == "1":
cli_command = "certbot(-auto)"
os_info = "OS_NAME OS_VERSION"
python_version = "major.minor.patchlevel"
else:
cli_command = cli.cli_command
os_info = util.get_os_info_ua()
python_version = platform.python_version()
ua = ua.format(certbot.__version__, cli_command, os_info,
config.authenticator, config.installer, config.verb,
ua_flags(config), python_version,
"; " + config.user_agent_comment if config.user_agent_comment else "")
else:
ua = config.user_agent
return ua
def ua_flags(config):
"Turn some very important CLI flags into clues in the user agent."
if isinstance(config, DummyConfig):
return "FLAGS"
flags = []
if config.duplicate:
flags.append("dup")
if config.renew_by_default:
flags.append("frn")
if config.allow_subset_of_names:
flags.append("asn")
if config.noninteractive_mode:
flags.append("n")
hook_names = ("pre", "post", "renew", "manual_auth", "manual_cleanup")
hooks = [getattr(config, h + "_hook") for h in hook_names]
if any(hooks):
flags.append("hook")
return " ".join(flags)
class DummyConfig:
"Shim for computing a sample user agent."
def __init__(self):
self.authenticator = "XXX"
self.installer = "YYY"
self.user_agent = None
self.verb = "SUBCOMMAND"
def __getattr__(self, name):
"Any config properties we might have are None."
return None
def sample_user_agent():
"Document what this Certbot's user agent string will be like."
return determine_user_agent(DummyConfig())
def register(config, account_storage, tos_cb=None):
"""Register new account with an ACME CA.
This function takes care of generating fresh private key,
registering the account, optionally accepting CA Terms of Service
and finally saving the account. It should be called prior to
initialization of `Client`, unless account has already been created.
:param .IConfig config: Client configuration.
:param .AccountStorage account_storage: Account storage where newly
registered account will be saved to. Save happens only after TOS
acceptance step, so any account private keys or
`.RegistrationResource` will not be persisted if `tos_cb`
returns ``False``.
:param tos_cb: If ACME CA requires the user to accept a Terms of
Service before registering account, client action is
necessary. For example, a CLI tool would prompt the user
acceptance. `tos_cb` must be a callable that should accept
`.RegistrationResource` and return a `bool`: ``True`` iff the
Terms of Service present in the contained
`.Registration.terms_of_service` is accepted by the client, and
``False`` otherwise. ``tos_cb`` will be called only if the
client action is necessary, i.e. when ``terms_of_service is not
None``. This argument is optional, if not supplied it will
default to automatic acceptance!
:raises certbot.errors.Error: In case of any client problems, in
particular registration failure, or unaccepted Terms of Service.
:raises acme.errors.Error: In case of any protocol problems.
:returns: Newly registered and saved account, as well as protocol
API handle (should be used in `Client` initialization).
:rtype: `tuple` of `.Account` and `acme.client.Client`
"""
# Log non-standard actions, potentially wrong API calls
if account_storage.find_all():
logger.info("There are already existing accounts for %s", config.server)
if config.email is None:
if not config.register_unsafely_without_email:
msg = ("No email was provided and "
"--register-unsafely-without-email was not present.")
logger.warning(msg)
raise errors.Error(msg)
if not config.dry_run:
logger.debug("Registering without email!")
# If --dry-run is used, and there is no staging account, create one with no email.
if config.dry_run:
config.email = None
# Each new registration shall use a fresh new key
rsa_key = generate_private_key(
public_exponent=65537,
key_size=config.rsa_key_size,
backend=default_backend())
key = jose.JWKRSA(key=jose.ComparableRSAKey(rsa_key))
acme = acme_from_config_key(config, key)
# TODO: add phone?
regr = perform_registration(acme, config, tos_cb)
acc = account.Account(regr, key)
account_storage.save(acc, acme)
eff.prepare_subscription(config, acc)
return acc, acme
def perform_registration(acme, config, tos_cb):
"""
Actually register new account, trying repeatedly if there are email
problems
:param acme.client.Client client: ACME client object.
:param .IConfig config: Client configuration.
:param Callable tos_cb: a callback to handle Term of Service agreement.
:returns: Registration Resource.
:rtype: `acme.messages.RegistrationResource`
"""
eab_credentials_supplied = config.eab_kid and config.eab_hmac_key
if eab_credentials_supplied:
account_public_key = acme.client.net.key.public_key()
eab = messages.ExternalAccountBinding.from_data(account_public_key=account_public_key,
kid=config.eab_kid,
hmac_key=config.eab_hmac_key,
directory=acme.client.directory)
else:
eab = None
if acme.external_account_required():
if not eab_credentials_supplied:
msg = ("Server requires external account binding."
" Please use --eab-kid and --eab-hmac-key.")
raise errors.Error(msg)
try:
newreg = messages.NewRegistration.from_data(email=config.email,
external_account_binding=eab)
return acme.new_account_and_tos(newreg, tos_cb)
except messages.Error as e:
if e.code == "invalidEmail" or e.code == "invalidContact":
if config.noninteractive_mode:
msg = ("The ACME server believes %s is an invalid email address. "
"Please ensure it is a valid email and attempt "
"registration again." % config.email)
raise errors.Error(msg)
config.email = display_ops.get_email(invalid=True)
return perform_registration(acme, config, tos_cb)
raise
class Client:
"""Certbot's client.
:ivar .IConfig config: Client configuration.
:ivar .Account account: Account registered with `register`.
:ivar .AuthHandler auth_handler: Authorizations handler that will
dispatch DV challenges to appropriate authenticators
(providing `.IAuthenticator` interface).
:ivar .IAuthenticator auth: Prepared (`.IAuthenticator.prepare`)
authenticator that can solve ACME challenges.
:ivar .IInstaller installer: Installer.
:ivar acme.client.BackwardsCompatibleClientV2 acme: Optional ACME
client API handle. You might already have one from `register`.
"""
def __init__(self, config, account_, auth, installer, acme=None):
"""Initialize a client."""
self.config = config
self.account = account_
self.auth = auth
self.installer = installer
# Initialize ACME if account is provided
if acme is None and self.account is not None:
acme = acme_from_config_key(config, self.account.key, self.account.regr)
self.acme = acme
if auth is not None:
self.auth_handler = auth_handler.AuthHandler(
auth, self.acme, self.account, self.config.pref_challs)
else:
self.auth_handler = None
def obtain_certificate_from_csr(self, csr, orderr=None):
"""Obtain certificate.
:param .util.CSR csr: PEM-encoded Certificate Signing
Request. The key used to generate this CSR can be different
than `authkey`.
:param acme.messages.OrderResource orderr: contains authzrs
:returns: certificate and chain as PEM byte strings
:rtype: tuple
"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.warning(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s", csr)
if orderr is None:
orderr = self._get_order_and_authorizations(csr.data, best_effort=False)
deadline = datetime.datetime.now() + datetime.timedelta(seconds=90)
get_alt_chains = self.config.preferred_chain is not None
orderr = self.acme.finalize_order(orderr, deadline,
fetch_alternative_chains=get_alt_chains)
fullchain = orderr.fullchain_pem
if get_alt_chains and orderr.alternative_fullchains_pem:
fullchain = crypto_util.find_chain_with_issuer([fullchain] + \
orderr.alternative_fullchains_pem,
self.config.preferred_chain,
not self.config.dry_run)
cert, chain = crypto_util.cert_and_chain_from_fullchain(fullchain)
return cert.encode(), chain.encode()
def obtain_certificate(self, domains, old_keypath=None):
"""Obtains a certificate from the ACME server.
`.register` must be called before `.obtain_certificate`
:param list domains: domains to get a certificate
:returns: certificate as PEM string, chain as PEM string,
newly generated private key (`.util.Key`), and DER-encoded
Certificate Signing Request (`.util.CSR`).
:rtype: tuple
"""
# We need to determine the key path, key PEM data, CSR path,
# and CSR PEM data. For a dry run, the paths are None because
# they aren't permanently saved to disk. For a lineage with
# --reuse-key, the key path and PEM data are derived from an
# existing file.
if old_keypath is not None:
# We've been asked to reuse a specific existing private key.
# Therefore, we'll read it now and not generate a new one in
# either case below.
#
# We read in bytes here because the type of `key.pem`
# created below is also bytes.
with open(old_keypath, "rb") as f:
keypath = old_keypath
keypem = f.read()
key: Optional[util.Key] = util.Key(file=keypath, pem=keypem)
logger.info("Reusing existing private key from %s.", old_keypath)
else:
# The key is set to None here but will be created below.
key = None
key_size = self.config.rsa_key_size
elliptic_curve = None
# key-type defaults to a list, but we are only handling 1 currently
if isinstance(self.config.key_type, list):
self.config.key_type = self.config.key_type[0]
if self.config.elliptic_curve and self.config.key_type == 'ecdsa':
elliptic_curve = self.config.elliptic_curve
self.config.auth_chain_path = "./chain-ecdsa.pem"
self.config.auth_cert_path = "./cert-ecdsa.pem"
self.config.key_path = "./key-ecdsa.pem"
elif self.config.rsa_key_size and self.config.key_type.lower() == 'rsa':
key_size = self.config.rsa_key_size
# Create CSR from names
if self.config.dry_run:
key = key or util.Key(
file=None,
pem=crypto_util.make_key(
bits=key_size,
elliptic_curve=elliptic_curve,
key_type=self.config.key_type,
),
)
csr = util.CSR(file=None, form="pem",
data=acme_crypto_util.make_csr(
key.pem, domains, self.config.must_staple))
else:
key = key or crypto_util.init_save_key(
key_size=key_size,
key_dir=self.config.key_dir,
key_type=self.config.key_type,
elliptic_curve=elliptic_curve,
)
csr = crypto_util.init_save_csr(key, domains, self.config.csr_dir)
orderr = self._get_order_and_authorizations(csr.data, self.config.allow_subset_of_names)
authzr = orderr.authorizations
auth_domains = set(a.body.identifier.value for a in authzr)
successful_domains = [d for d in domains if d in auth_domains]
# allow_subset_of_names is currently disabled for wildcard
# certificates. The reason for this and checking allow_subset_of_names
# below is because successful_domains == domains is never true if
# domains contains a wildcard because the ACME spec forbids identifiers
# in authzs from containing a wildcard character.
if self.config.allow_subset_of_names and successful_domains != domains:
if not self.config.dry_run:
os.remove(key.file)
os.remove(csr.file)
return self.obtain_certificate(successful_domains)
else:
cert, chain = self.obtain_certificate_from_csr(csr, orderr)
return cert, chain, key, csr
def _get_order_and_authorizations(self, csr_pem: str,
best_effort: bool) -> messages.OrderResource:
"""Request a new order and complete its authorizations.
:param str csr_pem: A CSR in PEM format.
:param bool best_effort: True if failing to complete all
authorizations should not raise an exception
:returns: order resource containing its completed authorizations
:rtype: acme.messages.OrderResource
"""
try:
orderr = self.acme.new_order(csr_pem)
except acme_errors.WildcardUnsupportedError:
raise errors.Error("The currently selected ACME CA endpoint does"
" not support issuing wildcard certificates.")
# For a dry run, ensure we have an order with fresh authorizations
if orderr and self.config.dry_run:
deactivated, failed = self.auth_handler.deactivate_valid_authorizations(orderr)
if deactivated:
logger.debug("Recreating order after authz deactivations")
orderr = self.acme.new_order(csr_pem)
if failed:
logger.warning("Certbot was unable to obtain fresh authorizations for every domain"
". The dry run will continue, but results may not be accurate.")
authzr = self.auth_handler.handle_authorizations(orderr, best_effort)
return orderr.update(authorizations=authzr)
def obtain_and_enroll_certificate(self, domains, certname):
"""Obtain and enroll certificate.
Get a new certificate for the specified domains using the specified
authenticator and installer, and then create a new renewable lineage
containing it.
:param domains: domains to request a certificate for
:type domains: `list` of `str`
:param certname: requested name of lineage
:type certname: `str` or `None`
:returns: A new :class:`certbot._internal.storage.RenewableCert` instance
referred to the enrolled cert lineage, False if the cert could not
be obtained, or None if doing a successful dry run.
"""
cert, chain, key, _ = self.obtain_certificate(domains)
if (self.config.config_dir != constants.CLI_DEFAULTS["config_dir"] or
self.config.work_dir != constants.CLI_DEFAULTS["work_dir"]):
logger.info(
"Non-standard path(s), might not work with crontab installed "
"by your operating system package manager")
new_name = self._choose_lineagename(domains, certname)
if self.config.dry_run:
logger.debug("Dry run: Skipping creating new lineage for %s",
new_name)
return None
return storage.RenewableCert.new_lineage(
new_name, cert,
key.pem, chain,
self.config)
def _choose_lineagename(self, domains, certname):
"""Chooses a name for the new lineage.
:param domains: domains in certificate request
:type domains: `list` of `str`
:param certname: requested name of lineage
:type certname: `str` or `None`
:returns: lineage name that should be used
:rtype: str
"""
if certname:
return certname
elif util.is_wildcard_domain(domains[0]):
# Don't make files and directories starting with *.
return domains[0][2:]
return domains[0]
def save_certificate(self, cert_pem, chain_pem,
cert_path, chain_path, fullchain_path):
"""Saves the certificate received from the ACME server.
:param str cert_pem:
:param str chain_pem:
:param str cert_path: Candidate path to a certificate.
:param str chain_path: Candidate path to a certificate chain.
:param str fullchain_path: Candidate path to a full cert chain.
:returns: cert_path, chain_path, and fullchain_path as absolute
paths to the actual files
:rtype: `tuple` of `str`
:raises IOError: If unable to find room to write the cert files
"""
for path in cert_path, chain_path, fullchain_path:
util.make_or_verify_dir(os.path.dirname(path), 0o755, self.config.strict_permissions)
cert_file, abs_cert_path = _open_pem_file('cert_path', cert_path)
try:
cert_file.write(cert_pem)
finally:
cert_file.close()
logger.info("Server issued certificate; certificate written to %s",
abs_cert_path)
chain_file, abs_chain_path =\
_open_pem_file('chain_path', chain_path)
fullchain_file, abs_fullchain_path =\
_open_pem_file('fullchain_path', fullchain_path)
_save_chain(chain_pem, chain_file)
_save_chain(cert_pem + chain_pem, fullchain_file)
return abs_cert_path, abs_chain_path, abs_fullchain_path
def deploy_certificate(self, domains, privkey_path,
cert_path, chain_path, fullchain_path):
"""Install certificate
:param list domains: list of domains to install the certificate
:param str privkey_path: path to certificate private key
:param str cert_path: certificate file path (optional)
:param str chain_path: chain file path
"""
if self.installer is None:
logger.warning("No installer specified, client is unable to deploy"
"the certificate")
raise errors.Error("No installer available")
chain_path = None if chain_path is None else os.path.abspath(chain_path)
msg = ("Unable to install the certificate")
with error_handler.ErrorHandler(self._recovery_routine_with_msg, msg):
for dom in domains:
self.installer.deploy_cert(
domain=dom, cert_path=os.path.abspath(cert_path),
key_path=os.path.abspath(privkey_path),
chain_path=chain_path,
fullchain_path=fullchain_path)
self.installer.save() # needed by the Apache plugin
self.installer.save("Deployed ACME Certificate")
msg = ("We were unable to install your certificate, "
"however, we successfully restored your "
"server to its prior configuration.")
with error_handler.ErrorHandler(self._rollback_and_restart, msg):
# sites may have been enabled / final cleanup
self.installer.restart()
def enhance_config(self, domains, chain_path, redirect_default=True):
"""Enhance the configuration.
:param list domains: list of domains to configure
:param chain_path: chain file path
:type chain_path: `str` or `None`
:param redirect_default: boolean value that the "redirect" flag should default to
:raises .errors.Error: if no installer is specified in the
client.
"""
if self.installer is None:
logger.warning("No installer is specified, there isn't any "
"configuration to enhance.")
raise errors.Error("No installer available")
enhanced = False
enhancement_info = (
("hsts", "ensure-http-header", "Strict-Transport-Security"),
("redirect", "redirect", None),
("staple", "staple-ocsp", chain_path),
("uir", "ensure-http-header", "Upgrade-Insecure-Requests"),)
supported = self.installer.supported_enhancements()
for config_name, enhancement_name, option in enhancement_info:
config_value = getattr(self.config, config_name)
if enhancement_name in supported:
if config_name == "redirect" and config_value is None:
config_value = redirect_default
if config_value:
self.apply_enhancement(domains, enhancement_name, option)
enhanced = True
elif config_value:
logger.warning(
"Option %s is not supported by the selected installer. "
"Skipping enhancement.", config_name)
msg = ("We were unable to restart web server")
if enhanced:
with error_handler.ErrorHandler(self._rollback_and_restart, msg):
self.installer.restart()
def apply_enhancement(self, domains, enhancement, options=None):
"""Applies an enhancement on all domains.
:param list domains: list of ssl_vhosts (as strings)
:param str enhancement: name of enhancement, e.g. ensure-http-header
:param str options: options to enhancement, e.g. Strict-Transport-Security
.. note:: When more `options` are needed, make options a list.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
msg = ("We were unable to set up enhancement %s for your server, "
"however, we successfully installed your certificate."
% (enhancement))
with error_handler.ErrorHandler(self._recovery_routine_with_msg, msg):
for dom in domains:
try:
self.installer.enhance(dom, enhancement, options)
except errors.PluginEnhancementAlreadyPresent:
if enhancement == "ensure-http-header":
logger.warning("Enhancement %s was already set.",
options)
else:
logger.warning("Enhancement %s was already set.",
enhancement)
except errors.PluginError:
logger.warning("Unable to set enhancement %s for %s",
enhancement, dom)
raise
self.installer.save("Add enhancement %s" % (enhancement))
def _recovery_routine_with_msg(self, success_msg):
"""Calls the installer's recovery routine and prints success_msg
:param str success_msg: message to show on successful recovery
"""
self.installer.recovery_routine()
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(success_msg, reporter.HIGH_PRIORITY)
def _rollback_and_restart(self, success_msg):
"""Rollback the most recent checkpoint and restart the webserver
:param str success_msg: message to show on successful rollback
"""
logger.critical("Rolling back to previous server configuration...")
reporter = zope.component.getUtility(interfaces.IReporter)
try:
self.installer.rollback_checkpoints()
self.installer.restart()
except:
reporter.add_message(
"An error occurred and we failed to restore your config and "
"restart your server. Please post to "
"https://community.letsencrypt.org/c/help "
"with details about your configuration and this error you received.",
reporter.HIGH_PRIORITY)
raise
reporter.add_message(success_msg, reporter.HIGH_PRIORITY)
def validate_key_csr(privkey, csr=None):
"""Validate Key and CSR files.
Verifies that the client key and csr arguments are valid and correspond to
one another. This does not currently check the names in the CSR due to
the inability to read SANs from CSRs in python crypto libraries.
If csr is left as None, only the key will be validated.
:param privkey: Key associated with CSR
:type privkey: :class:`certbot.util.Key`
:param .util.CSR csr: CSR
:raises .errors.Error: when validation fails
"""
# TODO: Handle all of these problems appropriately
# The client can eventually do things like prompt the user
# and allow the user to take more appropriate actions
# Key must be readable and valid.
if privkey.pem and not crypto_util.valid_privkey(privkey.pem):
raise errors.Error("The provided key is not a valid key")
if csr:
if csr.form == "der":
csr_obj = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)
cert_buffer = OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_PEM, csr_obj
)
csr = util.CSR(csr.file, cert_buffer, "pem")
# If CSR is provided, it must be readable and valid.
if csr.data and not crypto_util.valid_csr(csr.data):
raise errors.Error("The provided CSR is not a valid CSR")
# If both CSR and key are provided, the key must be the same key used
# in the CSR.
if csr.data and privkey.pem:
if not crypto_util.csr_matches_pubkey(
csr.data, privkey.pem):
raise errors.Error("The key and CSR do not match")
def rollback(default_installer, checkpoints, config, plugins):
"""Revert configuration the specified number of checkpoints.
:param int checkpoints: Number of checkpoints to revert.
:param config: Configuration.
:type config: :class:`certbot.interfaces.IConfig`
"""
# Misconfigurations are only a slight problems... allow the user to rollback
installer = plugin_selection.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for rollback?")
# No Errors occurred during init... proceed normally
# If installer is None... couldn't find an installer... there shouldn't be
# anything to rollback
if installer is not None:
installer.rollback_checkpoints(checkpoints)
installer.restart()
def _open_pem_file(cli_arg_path, pem_path):
"""Open a pem file.
If cli_arg_path was set by the client, open that.
Otherwise, uniquify the file path.
:param str cli_arg_path: the cli arg name, e.g. cert_path
:param str pem_path: the pem file path to open
:returns: a tuple of file object and its absolute file path
"""
if cli.set_by_cli(cli_arg_path):
return util.safe_open(pem_path, chmod=0o644, mode="wb"),\
os.path.abspath(pem_path)
uniq = util.unique_file(pem_path, 0o644, "wb")
return uniq[0], os.path.abspath(uniq[1])
def _save_chain(chain_pem, chain_file):
"""Saves chain_pem at a unique path based on chain_path.
:param str chain_pem: certificate chain in PEM format
:param str chain_file: chain file object
"""
try:
chain_file.write(chain_pem)
finally:
chain_file.close()
logger.info("Cert chain written to %s", chain_file.name)
| []
| []
| [
"CERTBOT_DOCS"
]
| [] | ["CERTBOT_DOCS"] | python | 1 | 0 | |
src/sentry/utils/pytest/sentry.py | from __future__ import absolute_import
from copy import deepcopy
import mock
import os
from django.conf import settings
from sentry_sdk import Hub
TEST_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, "tests")
)
def pytest_configure(config):
# HACK: Only needed for testing!
os.environ.setdefault("_SENTRY_SKIP_CONFIGURATION", "1")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sentry.conf.server")
# override docs which are typically synchronized from an upstream server
# to ensure tests are consistent
os.environ.setdefault(
"INTEGRATION_DOC_FOLDER", os.path.join(TEST_ROOT, "fixtures", "integration-docs")
)
from sentry.utils import integrationdocs
integrationdocs.DOC_FOLDER = os.environ["INTEGRATION_DOC_FOLDER"]
if not settings.configured:
# only configure the db if its not already done
test_db = os.environ.get("DB", "postgres")
if test_db == "postgres":
settings.DATABASES["default"].update(
{
"ENGINE": "sentry.db.postgres",
"USER": "postgres",
"NAME": "sentry",
"HOST": "127.0.0.1",
}
)
# postgres requires running full migration all the time
# since it has to install stored functions which come from
# an actual migration.
else:
raise RuntimeError("oops, wrong database: %r" % test_db)
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ("tests",)
# Need a predictable key for tests that involve checking signatures
settings.SENTRY_PUBLIC = False
if not settings.SENTRY_CACHE:
settings.SENTRY_CACHE = "sentry.cache.django.DjangoCache"
settings.SENTRY_CACHE_OPTIONS = {}
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
settings.AUTH_PASSWORD_VALIDATORS = []
# Replace real sudo middleware with our mock sudo middleware
# to assert that the user is always in sudo mode
middleware = list(settings.MIDDLEWARE_CLASSES)
sudo = middleware.index("sentry.middleware.sudo.SudoMiddleware")
middleware[sudo] = "sentry.testutils.middleware.SudoMiddleware"
settings.MIDDLEWARE_CLASSES = tuple(middleware)
settings.SENTRY_OPTIONS["cloudflare.secret-key"] = "cloudflare-secret-key"
# enable draft features
settings.SENTRY_OPTIONS["mail.enable-replies"] = True
settings.SENTRY_ALLOW_ORIGIN = "*"
settings.SENTRY_TSDB = "sentry.tsdb.inmemory.InMemoryTSDB"
settings.SENTRY_TSDB_OPTIONS = {}
if settings.SENTRY_NEWSLETTER == "sentry.newsletter.base.Newsletter":
settings.SENTRY_NEWSLETTER = "sentry.newsletter.dummy.DummyNewsletter"
settings.SENTRY_NEWSLETTER_OPTIONS = {}
settings.BROKER_BACKEND = "memory"
settings.BROKER_URL = None
settings.CELERY_ALWAYS_EAGER = False
settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
settings.DEBUG_VIEWS = True
settings.SENTRY_ENCRYPTION_SCHEMES = ()
settings.DISABLE_RAVEN = True
settings.CACHES = {"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
if os.environ.get("USE_SNUBA", False):
settings.SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend"
settings.SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
settings.SENTRY_EVENTSTREAM = "sentry.eventstream.snuba.SnubaEventStream"
# Use the synchronous executor to make multiple backends easier to test
eventstore_options = deepcopy(settings.SENTRY_EVENTSTORE_OPTIONS)
eventstore_options["backends"]["snuba_discover"]["executor"][
"path"
] = "sentry.utils.concurrent.SynchronousExecutor"
settings.SENTRY_EVENTSTORE_OPTIONS = eventstore_options
if not hasattr(settings, "SENTRY_OPTIONS"):
settings.SENTRY_OPTIONS = {}
settings.SENTRY_OPTIONS.update(
{
"redis.clusters": {"default": {"hosts": {0: {"db": 9}}}},
"mail.backend": "django.core.mail.backends.locmem.EmailBackend",
"system.url-prefix": "http://testserver",
"slack.client-id": "slack-client-id",
"slack.client-secret": "slack-client-secret",
"slack.verification-token": "slack-verification-token",
"github-app.name": "sentry-test-app",
"github-app.client-id": "github-client-id",
"github-app.client-secret": "github-client-secret",
"vsts.client-id": "vsts-client-id",
"vsts.client-secret": "vsts-client-secret",
}
)
# django mail uses socket.getfqdn which doesn't play nice if our
# networking isn't stable
patcher = mock.patch("socket.getfqdn", return_value="localhost")
patcher.start()
if not settings.MIGRATIONS_TEST_MIGRATE:
# Migrations for the "sentry" app take a long time to run, which makes test startup time slow in dev.
# This is a hack to force django to sync the database state from the models rather than use migrations.
settings.MIGRATION_MODULES["sentry"] = None
from sentry.runner.initializer import (
bind_cache_to_option_store,
bootstrap_options,
configure_structlog,
initialize_receivers,
monkeypatch_model_unpickle,
monkeypatch_django_migrations,
setup_services,
)
bootstrap_options(settings)
configure_structlog()
monkeypatch_model_unpickle()
import django
django.setup()
monkeypatch_django_migrations()
bind_cache_to_option_store()
initialize_receivers()
setup_services()
register_extensions()
from sentry.utils.redis import clusters
with clusters.get("default").all() as client:
client.flushdb()
# force celery registration
from sentry.celery import app # NOQA
# disable DISALLOWED_IPS
from sentry import http
http.DISALLOWED_IPS = set()
def register_extensions():
from sentry.plugins.base import plugins
from sentry.plugins.utils import TestIssuePlugin2
plugins.register(TestIssuePlugin2)
from sentry import integrations
from sentry.integrations.bitbucket import BitbucketIntegrationProvider
from sentry.integrations.example import (
ExampleIntegrationProvider,
AliasedIntegrationProvider,
ExampleRepositoryProvider,
)
from sentry.integrations.github import GitHubIntegrationProvider
from sentry.integrations.github_enterprise import GitHubEnterpriseIntegrationProvider
from sentry.integrations.gitlab import GitlabIntegrationProvider
from sentry.integrations.jira import JiraIntegrationProvider
from sentry.integrations.jira_server import JiraServerIntegrationProvider
from sentry.integrations.slack import SlackIntegrationProvider
from sentry.integrations.vsts import VstsIntegrationProvider
from sentry.integrations.vsts_extension import VstsExtensionIntegrationProvider
from sentry.integrations.pagerduty.integration import PagerDutyIntegrationProvider
integrations.register(BitbucketIntegrationProvider)
integrations.register(ExampleIntegrationProvider)
integrations.register(AliasedIntegrationProvider)
integrations.register(GitHubIntegrationProvider)
integrations.register(GitHubEnterpriseIntegrationProvider)
integrations.register(GitlabIntegrationProvider)
integrations.register(JiraIntegrationProvider)
integrations.register(JiraServerIntegrationProvider)
integrations.register(SlackIntegrationProvider)
integrations.register(VstsIntegrationProvider)
integrations.register(VstsExtensionIntegrationProvider)
integrations.register(PagerDutyIntegrationProvider)
from sentry.plugins.base import bindings
from sentry.plugins.providers.dummy import DummyRepositoryProvider
bindings.add("repository.provider", DummyRepositoryProvider, id="dummy")
bindings.add(
"integration-repository.provider", ExampleRepositoryProvider, id="integrations:example"
)
def pytest_runtest_teardown(item):
if not os.environ.get("USE_SNUBA", False):
from sentry import tsdb
# TODO(dcramer): this only works if this is the correct tsdb backend
tsdb.flush()
# XXX(dcramer): only works with DummyNewsletter
from sentry import newsletter
if hasattr(newsletter.backend, "clear"):
newsletter.backend.clear()
from sentry.utils.redis import clusters
with clusters.get("default").all() as client:
client.flushdb()
from celery.task.control import discard_all
discard_all()
from sentry.models import OrganizationOption, ProjectOption, UserOption
for model in (OrganizationOption, ProjectOption, UserOption):
model.objects.clear_local_cache()
Hub.main.bind_client(None)
| []
| []
| [
"DB",
"INTEGRATION_DOC_FOLDER",
"USE_SNUBA"
]
| [] | ["DB", "INTEGRATION_DOC_FOLDER", "USE_SNUBA"] | python | 3 | 0 | |
tests/functional/tests.py | # coding: utf-8
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that walk through U-MOOC pages."""
__author__ = 'Sean Lip'
import __builtin__
import copy
import csv
import datetime
import logging
import os
import re
import shutil
import time
import urllib
import zipfile
import appengine_config
from controllers import lessons
from controllers import sites
from controllers import utils
from controllers.utils import XsrfTokenManager
from models import config
from models import courses
from models import jobs
from models import models
from models import transforms
from models import vfs
from models.courses import Course
import modules.admin.admin
from modules.announcements.announcements import AnnouncementEntity
from tools import verify
from tools.etl import etl
from tools.etl import remote
import actions
from actions import assert_contains
from actions import assert_contains_all_of
from actions import assert_does_not_contain
from actions import assert_equals
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
# A number of data files in a test course.
COURSE_FILE_COUNT = 70
# Base filesystem location for test data.
TEST_DATA_BASE = '/tmp/experimental/coursebuilder/test-data/'
# There is an expectation in our tests of automatic import of data/*.csv files,
# which is achieved below by selecting an alternative factory method.
courses.Course.create_new_default_course = (
courses.Course.custom_new_default_course_for_test)
class InfrastructureTest(actions.TestBase):
"""Test core infrastructure classes agnostic to specific user roles."""
def test_response_content_type_is_application_json_in_utf_8(self):
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
self.assertEqual(
'application/json, charset=utf-8', response.headers['Content-Type'])
def test_xsrf_token_manager(self):
"""Test XSRF token operations."""
# os.environ['AUTH_DOMAIN'] = 'test_domain'
# os.environ['APPLICATION_ID'] = 'test app'
# Issues and verify anonymous user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Impersonate real user.
os.environ['USER_EMAIL'] = 'test_email'
os.environ['USER_ID'] = 'test_id'
# Issues and verify real user token.
action = 'test-action'
token = utils.XsrfTokenManager.create_xsrf_token(action)
assert '/' in token
assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action)
# Check forged time stamp invalidates token.
parts = token.split('/')
assert len(parts) == 2
forgery = '%s/%s' % (long(parts[0]) + 1000, parts[1])
assert not forgery == token
assert not utils.XsrfTokenManager.is_xsrf_token_valid(forgery, action)
# Check token properly expires.
action = 'test-action'
time_in_the_past = long(
time.time() - utils.XsrfTokenManager.XSRF_TOKEN_AGE_SECS)
# pylint: disable-msg=protected-access
old_token = utils.XsrfTokenManager._create_token(
action, time_in_the_past)
assert not utils.XsrfTokenManager.is_xsrf_token_valid(old_token, action)
# Clean up.
# del os.environ['APPLICATION_ID']
# del os.environ['AUTH_DOMAIN']
del os.environ['USER_EMAIL']
del os.environ['USER_ID']
def test_import_course(self):
"""Tests importing one course into another."""
# Setup courses.
sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/')
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
dst_course_a = courses.Course(None, app_context=dst_app_context_a)
dst_course_b = courses.Course(None, app_context=dst_app_context_b)
src_course = courses.Course(None, app_context=src_app_context)
assert not dst_course_a.get_units()
assert not dst_course_b.get_units()
assert 11 == len(src_course.get_units())
# Import 1.2 course into 1.3.
errors = []
src_course_out, dst_course_out_a = dst_course_a.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course.get_units()) == len(src_course_out.get_units())
assert len(
src_course_out.get_units()) == len(dst_course_out_a.get_units())
# Import 1.3 course into 1.3.
errors = []
src_course_out_a, dst_course_out_b = dst_course_b.import_from(
dst_app_context_a, errors)
if errors:
raise Exception(errors)
assert src_course_out_a.get_units() == dst_course_out_b.get_units()
# Test delete.
units_to_delete = dst_course_a.get_units()
deleted_count = 0
for unit in units_to_delete:
assert dst_course_a.delete_unit(unit)
deleted_count += 1
dst_course_a.save()
assert deleted_count == len(units_to_delete)
assert not dst_course_a.get_units()
assert not dst_course_a.app_context.fs.list(os.path.join(
dst_course_a.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.reset_courses()
def test_create_new_course(self):
"""Tests creating a new course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Add several units.
course = courses.Course(None, app_context=sites.get_all_courses()[0])
link = course.add_link()
unit = course.add_unit()
assessment = course.add_assessment()
course.save()
assert course.find_unit_by_id(link.unit_id)
assert course.find_unit_by_id(unit.unit_id)
assert course.find_unit_by_id(assessment.unit_id)
assert 3 == len(course.get_units())
assert assessment.unit_id == 3
# Check unit can be found.
assert unit == course.find_unit_by_id(unit.unit_id)
assert not course.find_unit_by_id(999)
# Update unit.
unit.title = 'Test Title'
course.update_unit(unit)
course.save()
assert 'Test Title' == course.find_unit_by_id(unit.unit_id).title
# Update assessment.
assessment_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_content = u''.join(assessment_content)
errors = []
course.set_assessment_content(assessment, assessment_content, errors)
course.save()
assert not errors
assessment_content_stored = course.app_context.fs.get(os.path.join(
course.app_context.get_home(),
course.get_assessment_filename(assessment.unit_id)))
assert assessment_content == assessment_content_stored
# Test adding lessons.
lesson_a = course.add_lesson(unit)
lesson_b = course.add_lesson(unit)
lesson_c = course.add_lesson(unit)
course.save()
assert [lesson_a, lesson_b, lesson_c] == course.get_lessons(
unit.unit_id)
assert lesson_c.lesson_id == 6
# Reorder lessons.
new_order = [
{'id': link.unit_id},
{
'id': unit.unit_id,
'lessons': [
{'id': lesson_b.lesson_id},
{'id': lesson_a.lesson_id},
{'id': lesson_c.lesson_id}]},
{'id': assessment.unit_id}]
course.reorder_units(new_order)
course.save()
assert [lesson_b, lesson_a, lesson_c] == course.get_lessons(
unit.unit_id)
# Move lesson to another unit.
another_unit = course.add_unit()
course.move_lesson_to(lesson_b, another_unit)
course.save()
assert [lesson_a, lesson_c] == course.get_lessons(unit.unit_id)
assert [lesson_b] == course.get_lessons(another_unit.unit_id)
course.delete_unit(another_unit)
course.save()
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Test public/private assessment.
assessment_url = (
'/test/' + course.get_assessment_filename(assessment.unit_id))
assert not assessment.now_available
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 403)
assessment = course.find_unit_by_id(assessment.unit_id)
assessment.now_available = True
course.update_unit(assessment)
course.save()
response = self.get(assessment_url)
assert_equals(response.status_int, 200)
# Check delayed assessment deletion.
course.delete_unit(assessment)
response = self.get(assessment_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(assessment_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test public/private activity.
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = False
lesson_a.has_activity = True
course.update_lesson(lesson_a)
errors = []
course.set_activity_content(lesson_a, u'var activity = []', errors)
assert not errors
activity_url = (
'/test/' + course.get_activity_filename(None, lesson_a.lesson_id))
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 403)
lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id)
lesson_a.now_available = True
course.update_lesson(lesson_a)
course.save()
response = self.get(activity_url)
assert_equals(response.status_int, 200)
# Check delayed activity.
course.delete_lesson(lesson_a)
response = self.get(activity_url) # note: file is still available
assert_equals(response.status_int, 200)
course.save()
response = self.get(activity_url, expect_errors=True)
assert_equals(response.status_int, 404)
# Test deletes removes all child objects.
course.delete_unit(link)
course.delete_unit(unit)
assert not course.delete_unit(assessment)
course.save()
assert not course.get_units()
assert not course.app_context.fs.list(os.path.join(
course.app_context.get_home(), 'assets/js/'))
# Clean up.
sites.ApplicationContext.get_environ = get_environ_old
sites.reset_courses()
def test_unit_lesson_not_available(self):
"""Tests that unavailable units and lessons behave correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
# Add a unit that is not available.
unit_1 = course.add_unit()
unit_1.now_available = False
lesson_1_1 = course.add_lesson(unit_1)
lesson_1_1.title = 'Lesson 1.1'
course.update_unit(unit_1)
# Add a unit with some lessons available and some lessons not available.
unit_2 = course.add_unit()
unit_2.now_available = True
lesson_2_1 = course.add_lesson(unit_2)
lesson_2_1.title = 'Lesson 2.1'
lesson_2_1.now_available = False
lesson_2_2 = course.add_lesson(unit_2)
lesson_2_2.title = 'Lesson 2.2'
lesson_2_2.now_available = True
course.update_unit(unit_2)
# Add a unit with all lessons not available.
unit_3 = course.add_unit()
unit_3.now_available = True
lesson_3_1 = course.add_lesson(unit_3)
lesson_3_1.title = 'Lesson 3.1'
lesson_3_1.now_available = False
course.update_unit(unit_3)
# Add a unit that is available.
unit_4 = course.add_unit()
unit_4.now_available = True
lesson_4_1 = course.add_lesson(unit_4)
lesson_4_1.title = 'Lesson 4.1'
lesson_4_1.now_available = True
course.update_unit(unit_4)
course.save()
assert [lesson_1_1] == course.get_lessons(unit_1.unit_id)
assert [lesson_2_1, lesson_2_2] == course.get_lessons(unit_2.unit_id)
assert [lesson_3_1] == course.get_lessons(unit_3.unit_id)
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
private_tag = 'id="lesson-title-private"'
# Simulate a student traversing the course.
email = '[email protected]'
name = 'Test Unit Lesson Not Available'
actions.login(email, is_admin=False)
actions.register(self, name)
# Accessing a unit that is not available redirects to the main page.
response = self.get('/test/unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 302)
response = self.get('/test/unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('/test/unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('/test/unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_contains('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('/test/unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Simulate an admin traversing the course.
email = '[email protected]_admin'
name = 'Test Unit Lesson Not Available Admin'
actions.login(email, is_admin=True)
actions.register(self, name)
# The course admin can access a unit that is not available.
response = self.get('/test/unit?unit=%s' % unit_1.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 1.1', response.body)
response = self.get('/test/unit?unit=%s' % unit_2.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('/test/unit?unit=%s&lesson=%s' % (
unit_2.unit_id, lesson_2_2.lesson_id))
assert_equals(response.status_int, 200)
assert_contains('Lesson 2.2', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
response = self.get('/test/unit?unit=%s' % unit_3.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 3.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_contains(private_tag, response.body)
response = self.get('/test/unit?unit=%s' % unit_4.unit_id)
assert_equals(response.status_int, 200)
assert_contains('Lesson 4.1', response.body)
assert_does_not_contain('This lesson is not available.', response.body)
assert_does_not_contain(private_tag, response.body)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_custom_assessments(self):
"""Tests that custom assessments are evaluated correctly."""
# Setup a new course.
sites.setup_courses('course:/test::ns_test, course:/:/')
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
app_context = sites.get_all_courses()[0]
course = courses.Course(None, app_context=app_context)
email = '[email protected]'
name = 'Test Assessments'
assessment_1 = course.add_assessment()
assessment_1.title = 'first'
assessment_1.now_available = True
assessment_1.weight = 0
assessment_2 = course.add_assessment()
assessment_2.title = 'second'
assessment_2.now_available = True
assessment_2.weight = 0
course.save()
assert course.find_unit_by_id(assessment_1.unit_id)
assert course.find_unit_by_id(assessment_2.unit_id)
assert 2 == len(course.get_units())
# Make the course available.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
first = {'score': '1.00', 'assessment_type': assessment_1.unit_id}
second = {'score': '3.00', 'assessment_type': assessment_2.unit_id}
# Update assessment 1.
assessment_1_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Pre.js'), 'rb').readlines()
assessment_1_content = u''.join(assessment_1_content)
errors = []
course.set_assessment_content(
assessment_1, assessment_1_content, errors)
course.save()
assert not errors
# Update assessment 2.
assessment_2_content = open(os.path.join(
appengine_config.BUNDLE_ROOT,
'assets/js/assessment-Mid.js'), 'rb').readlines()
assessment_2_content = u''.join(assessment_2_content)
errors = []
course.set_assessment_content(
assessment_2, assessment_2_content, errors)
course.save()
assert not errors
# Register.
actions.login(email)
actions.register(self, name)
# Submit assessment 1.
actions.submit_assessment(
self, assessment_1.unit_id, first, base='/test')
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
assert student_scores[0]['title'] == 'first'
assert student_scores[0]['weight'] == 0
assert student_scores[1]['id'] == str(assessment_2.unit_id)
assert student_scores[1]['score'] == 0
assert student_scores[1]['title'] == 'second'
assert student_scores[1]['weight'] == 0
# The overall score is None if there are no weights assigned to any of
# the assessments.
overall_score = course.get_overall_score(student)
assert overall_score is None
# View the student profile page.
response = self.get('/test/student/home')
assert_does_not_contain('Overall course score', response.body)
# Add a weight to the first assessment.
assessment_1.weight = 10
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Submit assessment 2.
actions.submit_assessment(
self, assessment_2.unit_id, second, base='/test')
# We need to reload the student instance, because its properties have
# changed.
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[1]['score'] == 3
overall_score = course.get_overall_score(student)
assert overall_score == 1
# Change the weight of assessment 2.
assessment_2.weight = 30
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
# Save all changes.
course.save()
# View the student profile page.
response = self.get('/test/student/home')
assert_contains('assessment-score-first">1</span>', response.body)
assert_contains('assessment-score-second">3</span>', response.body)
assert_contains('Overall course score', response.body)
assert_contains('assessment-score-overall">2</span>', response.body)
# Submitting a lower score for any assessment does not change any of
# the scores, since the system records the maximum score that has ever
# been achieved on any assessment.
first_retry = {'score': '0', 'assessment_type': assessment_1.unit_id}
actions.submit_assessment(
self, assessment_1.unit_id, first_retry, base='/test')
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 2
assert student_scores[0]['id'] == str(assessment_1.unit_id)
assert student_scores[0]['score'] == 1
overall_score = course.get_overall_score(student)
assert overall_score == int((1 * 10 + 3 * 30) / 40)
actions.logout()
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_datastore_backed_file_system(self):
"""Tests datastore-backed file system operations."""
fs = vfs.AbstractFileSystem(vfs.DatastoreBackedFileSystem('', '/'))
# Check binary file.
src = os.path.join(appengine_config.BUNDLE_ROOT, 'course.yaml')
dst = os.path.join('/', 'course.yaml')
fs.put(dst, open(src, 'rb'))
stored = fs.open(dst)
assert stored.metadata.size == len(open(src, 'rb').read())
assert not stored.metadata.is_draft
assert stored.read() == open(src, 'rb').read()
# Check draft.
fs.put(dst, open(src, 'rb'), is_draft=True)
stored = fs.open(dst)
assert stored.metadata.is_draft
# Check text files with non-ASCII characters and encoding.
foo_js = os.path.join('/', 'assets/js/foo.js')
foo_text = u'This is a test text (тест данные).'
fs.put(foo_js, vfs.string_to_stream(foo_text))
stored = fs.open(foo_js)
assert vfs.stream_to_string(stored) == foo_text
# Check delete.
del_file = os.path.join('/', 'memcache.test')
fs.put(del_file, vfs.string_to_stream(u'test'))
assert fs.isfile(del_file)
fs.delete(del_file)
assert not fs.isfile(del_file)
# Check open or delete of non-existent does not fail.
assert not fs.open('/foo/bar/baz')
assert not fs.delete('/foo/bar/baz')
# Check new content fully overrides old (with and without memcache).
test_file = os.path.join('/', 'memcache.test')
fs.put(test_file, vfs.string_to_stream(u'test text'))
stored = fs.open(test_file)
assert u'test text' == vfs.stream_to_string(stored)
fs.delete(test_file)
# Check file existence.
assert not fs.isfile('/foo/bar')
assert fs.isfile('/course.yaml')
assert fs.isfile('/assets/js/foo.js')
# Check file listing.
bar_js = os.path.join('/', 'assets/js/bar.js')
fs.put(bar_js, vfs.string_to_stream(foo_text))
baz_js = os.path.join('/', 'assets/js/baz.js')
fs.put(baz_js, vfs.string_to_stream(foo_text))
assert fs.list('/') == sorted([
u'/course.yaml',
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert fs.list('/assets') == sorted([
u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js'])
assert not fs.list('/foo/bar')
def test_utf8_datastore(self):
"""Test writing to and reading from datastore using UTF-8 content."""
event = models.EventEntity()
event.source = 'test-source'
event.user_id = 'test-user-id'
event.data = u'Test Data (тест данные)'
event.put()
stored_event = models.EventEntity().get_by_id([event.key().id()])
assert 1 == len(stored_event)
assert event.data == stored_event[0].data
def assert_queriable(self, entity, name, date_type=datetime.datetime):
"""Create some entities and check that single-property queries work."""
for i in range(1, 32):
item = entity(
key_name='%s_%s' % (date_type.__class__.__name__, i))
setattr(item, name, date_type(2012, 1, i))
item.put()
# Descending order.
items = entity.all().order('-%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 31)
# Ascending order.
items = entity.all().order('%s' % name).fetch(1000)
assert len(items) == 31
assert getattr(items[0], name) == date_type(2012, 1, 1)
def test_indexed_properties(self):
"""Test whether entities support specific query types."""
# A 'DateProperty' or 'DateTimeProperty' of each persistent entity must
# be indexed. This is true even if the application doesn't execute any
# queries relying on the index. The index is still critically important
# for managing data, for example, for bulk data download or for
# incremental computations. Using index, the entire table can be
# processed in daily, weekly, etc. chunks and it is easy to query for
# new data. If we did not have an index, chunking would have to be done
# by the primary index, where it is impossible to separate recently
# added/modified rows from the rest of the data. Having this index adds
# to the cost of datastore writes, but we believe it is important to
# have it. Below we check that all persistent date/datetime properties
# are indexed.
self.assert_queriable(AnnouncementEntity, 'date', datetime.date)
self.assert_queriable(models.EventEntity, 'recorded_on')
self.assert_queriable(models.Student, 'enrolled_on')
self.assert_queriable(models.StudentAnswersEntity, 'updated_on')
self.assert_queriable(jobs.DurableJobEntity, 'updated_on')
def test_assets_and_date(self):
"""Verify semantics of all asset and data files."""
def echo(unused_message):
pass
warnings, errors = verify.Verifier().load_and_verify_model(echo)
assert not errors and not warnings
def test_config_visible_from_any_namespace(self):
"""Test that ConfigProperty is visible from any namespace."""
assert (
config.UPDATE_INTERVAL_SEC.value ==
config.UPDATE_INTERVAL_SEC.default_value)
new_value = config.UPDATE_INTERVAL_SEC.default_value + 5
# Add datastore override for known property.
prop = config.ConfigPropertyEntity(
key_name=config.UPDATE_INTERVAL_SEC.name)
prop.value = str(new_value)
prop.is_draft = False
prop.put()
# Check visible from default namespace.
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
# Check visible from another namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
'ns-test_config_visible_from_any_namespace')
config.Registry.last_update_time = 0
assert config.UPDATE_INTERVAL_SEC.value == new_value
finally:
namespace_manager.set_namespace(old_namespace)
class AdminAspectTest(actions.TestBase):
"""Test site from the Admin perspective."""
def test_courses_page_for_multiple_courses(self):
"""Tests /admin page showing multiple courses."""
# Setup courses.
sites.setup_courses('course:/aaa::ns_a, course:/bbb::ns_b, course:/:/')
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Validate the courses before import.
all_courses = sites.get_all_courses()
dst_app_context_a = all_courses[0]
dst_app_context_b = all_courses[1]
src_app_context = all_courses[2]
# This test requires a read-write file system. If test runs on read-
# only one, we can't run this test :(
if (not dst_app_context_a.fs.is_read_write() or
not dst_app_context_a.fs.is_read_write()):
return
course_a = courses.Course(None, app_context=dst_app_context_a)
course_b = courses.Course(None, app_context=dst_app_context_b)
unused_course, course_a = course_a.import_from(src_app_context)
unused_course, course_b = course_b.import_from(src_app_context)
# Rename courses.
dst_app_context_a.fs.put(
dst_app_context_a.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course AAA\''))
dst_app_context_b.fs.put(
dst_app_context_b.get_config_filename(),
vfs.string_to_stream(u'course:\n title: \'Course BBB\''))
# Login.
email = '[email protected]'
actions.login(email, True)
# Check the course listing page.
response = self.testapp.get('/admin')
assert_contains_all_of([
'Course AAA',
'/aaa/dashboard',
'Course BBB',
'/bbb/dashboard'], response.body)
# Clean up.
sites.reset_courses()
def test_python_console(self):
"""Test access rights to the Python console."""
email = '[email protected]'
# The default is that the console should be turned off
self.assertFalse(modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED)
# Test the console when it is enabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = True
# Check normal user has no access.
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 302)
response = self.testapp.post('/admin?action=console')
assert_equals(response.status_int, 302)
# Check delegated admin has no access.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
actions.login(email)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
assert_contains(
'You must be an actual admin user to continue.', response.body)
del os.environ['gcb_admin_user_emails']
# Check actual admin has access.
actions.login(email, True)
response = self.testapp.get('/admin?action=console')
assert_equals(response.status_int, 200)
response.form.set('code', 'print "foo" + "bar"')
response = self.submit(response.form)
assert_contains('foobar', response.body)
# Finally, test that the console is not found when it is disabled
modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = False
actions.login(email, True)
self.testapp.get('/admin?action=console', status=404)
self.testapp.post('/admin?action=console_run', status=404)
def test_non_admin_has_no_access(self):
"""Test non admin has no access to pages or REST endpoints."""
email = '[email protected]'
actions.login(email)
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has no access to specific pages and actions.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
response = self.testapp.post(
'/admin?action=config_reset&name=gcb_admin_user_emails')
assert_equals(response.status_int, 302)
# Check user has no rights to GET verb.
response = self.testapp.get(
'/rest/config/item?key=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
# Here are the endpoints we want to test: (uri, xsrf_action_name).
endpoints = [
('/rest/config/item', 'config-property-put'),
('/rest/courses/item', 'add-course-put')]
# Check user has no rights to PUT verb.
payload_dict = {}
payload_dict['value'] = '666'
payload_dict['is_draft'] = False
request = {}
request['key'] = 'gcb_config_update_interval_sec'
request['payload'] = transforms.dumps(payload_dict)
for uri, unused_action in endpoints:
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check user still has no rights to PUT verb even if he somehow
# obtained a valid XSRF token.
for uri, action in endpoints:
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(action)
response = self.testapp.put(uri + '?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
assert json_dict['message'] == 'Access denied.'
def test_admin_list(self):
"""Test delegation of admin access to another user."""
email = '[email protected]'
actions.login(email)
# Add environment variable override.
os.environ['gcb_admin_user_emails'] = '[%s]' % email
# Add datastore override.
prop = config.ConfigPropertyEntity(
key_name='gcb_config_update_interval_sec')
prop.value = '5'
prop.is_draft = False
prop.put()
# Check user has access now.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 200)
# Check overrides are active and have proper management actions.
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('[[email protected]]', response.body)
assert_contains(
'/admin?action=config_override&name=gcb_admin_user_emails',
response.body)
assert_contains(
'/admin?action=config_edit&name=gcb_config_update_interval_sec',
response.body)
# Check editor page has proper actions.
response = self.testapp.get(
'/admin?action=config_edit&name=gcb_config_update_interval_sec')
assert_equals(response.status_int, 200)
assert_contains('/admin?action=config_reset', response.body)
assert_contains('name=gcb_config_update_interval_sec', response.body)
# Remove override.
del os.environ['gcb_admin_user_emails']
# Check user has no access.
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_access_to_admin_pages(self):
"""Test access to admin pages."""
# assert anonymous user has no access
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
# assert admin user has access
email = '[email protected]'
name = 'Test Access to Admin Pages'
actions.login(email, True)
actions.register(self, name)
response = self.testapp.get('/admin')
assert_contains('Power Searching with Google', response.body)
assert_contains('All Courses', response.body)
response = self.testapp.get('/admin?action=settings')
assert_contains('gcb_admin_user_emails', response.body)
assert_contains('gcb_config_update_interval_sec', response.body)
assert_contains('All Settings', response.body)
response = self.testapp.get('/admin?action=perf')
assert_contains('gcb-admin-uptime-sec:', response.body)
assert_contains('In-process Performance Counters', response.body)
response = self.testapp.get('/admin?action=deployment')
assert_contains('application_id: testbed-test', response.body)
assert_contains('About the Application', response.body)
actions.unregister(self)
actions.logout()
# assert not-admin user has no access
actions.login(email)
actions.register(self, name)
response = self.testapp.get('/admin?action=settings')
assert_equals(response.status_int, 302)
def test_multiple_courses(self):
"""Test courses admin page with two courses configured."""
sites.setup_courses(
'course:/foo:/foo-data, course:/bar:/bar-data:nsbar')
email = '[email protected]'
actions.login(email, True)
response = self.testapp.get('/admin')
assert_contains('U-MOOC > Admin > Courses', response.body)
assert_contains('Total: 2 item(s)', response.body)
# Check ocurse URL's.
assert_contains('<a href="/foo/dashboard">', response.body)
assert_contains('<a href="/bar/dashboard">', response.body)
# Check content locations.
assert_contains('/foo-data', response.body)
assert_contains('/bar-data', response.body)
# Check namespaces.
assert_contains('gcb-course-foo-data', response.body)
assert_contains('nsbar', response.body)
# Clean up.
sites.reset_courses()
def test_add_course(self):
"""Tests adding a new course entry."""
if not self.supports_editing:
return
email = '[email protected]'
actions.login(email, True)
# Prepare request data.
payload_dict = {
'name': 'add_new',
'title': u'new course (тест данные)', 'admin_email': '[email protected]'}
request = {}
request['payload'] = transforms.dumps(payload_dict)
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'add-course-put')
# Execute action.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
# Check response.
json_dict = transforms.loads(transforms.loads(response.body)['payload'])
assert 'course:/add_new::ns_add_new' == json_dict.get('entry')
# Re-execute action; should fail as this would create a duplicate.
response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_equals(412, transforms.loads(response.body)['status'])
# Load the course and check its title.
new_app_context = sites.get_all_courses(
'course:/add_new::ns_add_new')[0]
assert_equals(u'new course (тест данные)', new_app_context.get_title())
new_course = courses.Course(None, app_context=new_app_context)
assert not new_course.get_units()
class CourseAuthorAspectTest(actions.TestBase):
"""Tests the site from the Course Author perspective."""
def test_dashboard(self):
"""Test course dashboard."""
email = '[email protected]'
name = 'Test Dashboard'
# Non-admin does't have access.
actions.login(email)
response = self.get('dashboard')
assert_equals(response.status_int, 302)
actions.register(self, name)
assert_equals(response.status_int, 302)
actions.logout()
# Admin has access.
actions.login(email, True)
response = self.get('dashboard')
assert_contains('Google > Dashboard > Outline', response.body)
# Tests outline view.
response = self.get('dashboard')
assert_contains('Unit 3 - Advanced techniques', response.body)
assert_contains('data/lesson.csv', response.body)
# Check editability.
if self.supports_editing:
assert_contains('Add Assessment', response.body)
else:
assert_does_not_contain('Add Assessment', response.body)
# Test assets view.
response = self.get('dashboard?action=assets')
assert_contains('Google > Dashboard > Assets', response.body)
assert_contains('assets/css/main.css', response.body)
assert_contains('assets/img/Image1.5.png', response.body)
assert_contains('assets/js/activity-3.2.js', response.body)
# Test settings view.
response = self.get('dashboard?action=settings')
assert_contains(
'Google > Dashboard > Settings', response.body)
assert_contains('course.yaml', response.body)
assert_contains(
'title: \'Power Searching with Google\'', response.body)
assert_contains('locale: \'en_US\'', response.body)
# Check editability.
if self.supports_editing:
assert_contains('create_or_edit_settings', response.body)
else:
assert_does_not_contain('create_or_edit_settings', response.body)
# Tests student statistics view.
response = self.get('dashboard?action=students')
assert_contains(
'Google > Dashboard > Students', response.body)
assert_contains('have not been calculated yet', response.body)
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert len(self.taskq.GetTasks('default')) == 1
response = self.get('dashboard?action=students')
assert_contains('is running', response.body)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=students')
assert_contains('were last updated on', response.body)
assert_contains('currently enrolled: 1', response.body)
assert_contains('total: 1', response.body)
# Tests assessment statistics.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
for i in range(5):
student = models.Student(key_name='key-%s' % i)
student.is_enrolled = True
student.scores = transforms.dumps({'test-assessment': i})
student.put()
finally:
namespace_manager.set_namespace(old_namespace)
response = self.get('dashboard?action=students')
compute_form = response.forms['gcb-compute-student-stats']
response = self.submit(compute_form)
self.execute_all_deferred_tasks()
response = self.get('dashboard?action=students')
assert_contains('currently enrolled: 6', response.body)
assert_contains(
'test-assessment: completed 5, average score 2.0', response.body)
def test_trigger_sample_announcements(self):
"""Test course author can trigger adding sample announcements."""
email = '[email protected]'
name = 'Test Announcements'
actions.login(email, True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_contains('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
def test_manage_announcements(self):
"""Test course author can manage announcements."""
email = '[email protected]'
name = 'Test Announcements'
actions.login(email, True)
actions.register(self, name)
# add new
response = actions.view_announcements(self)
add_form = response.forms['gcb-add-announcement']
response = self.submit(add_form)
assert_equals(response.status_int, 302)
# check edit form rendering
response = self.testapp.get(response.location)
assert_equals(response.status_int, 200)
assert_contains('/rest/announcements/item?key=', response.body)
# check added
response = actions.view_announcements(self)
assert_contains('Sample Announcement (Draft)', response.body)
# delete draft
response = actions.view_announcements(self)
delete_form = response.forms['gcb-delete-announcement-1']
response = self.submit(delete_form)
assert_equals(response.status_int, 302)
# check deleted
assert_does_not_contain('Welcome to the final class!', response.body)
def test_announcements_rest(self):
"""Test REST access to announcements."""
email = '[email protected]'
name = 'Test Announcements Rest'
actions.login(email, True)
actions.register(self, name)
response = actions.view_announcements(self)
assert_does_not_contain('My Test Title', response.body)
# REST GET existing item
items = AnnouncementEntity.all().fetch(1)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 200
assert 'message' in json_dict
assert 'payload' in json_dict
payload_dict = transforms.loads(json_dict['payload'])
assert 'title' in payload_dict
assert 'date' in payload_dict
# REST PUT item
payload_dict['title'] = u'My Test Title Мой заголовок теста'
payload_dict['date'] = '2012/12/31'
payload_dict['is_draft'] = True
request = {}
request['key'] = str(item.key())
request['payload'] = transforms.dumps(payload_dict)
# Check XSRF is required.
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = json_dict['xsrf_token']
response = self.put('rest/announcements/item?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 200', response.body)
# Confirm change is visible on the page.
response = self.get('announcements')
assert_contains(
u'My Test Title Мой заголовок теста (Draft)', response.body)
# REST GET not-existing item
response = self.get('rest/announcements/item?key=not_existent_key')
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 404
class StudentAspectTest(actions.TestBase):
"""Test the site from the Student perspective."""
def test_view_announcements(self):
"""Test student aspect of announcements."""
email = '[email protected]'
name = 'Test Announcements'
actions.login(email)
actions.register(self, name)
# Check no announcements yet.
response = actions.view_announcements(self)
assert_does_not_contain('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_contains('No announcements yet.', response.body)
actions.logout()
# Login as admin and add announcements.
actions.login('[email protected]', True)
actions.register(self, 'admin')
response = actions.view_announcements(self)
actions.logout()
# Check we can see non-draft announcements.
actions.login(email)
response = actions.view_announcements(self)
assert_contains('Example Announcement', response.body)
assert_does_not_contain('Welcome to the final class!', response.body)
assert_does_not_contain('No announcements yet.', response.body)
# Check no access to access to draft announcements via REST handler.
items = AnnouncementEntity.all().fetch(1000)
for item in items:
response = self.get('rest/announcements/item?key=%s' % item.key())
if item.is_draft:
json_dict = transforms.loads(response.body)
assert json_dict['status'] == 401
else:
assert_equals(response.status_int, 200)
def test_registration(self):
"""Test student registration."""
email = '[email protected]'
name1 = 'Test Student'
name2 = 'John Smith'
name3 = u'Pavel Simakov (тест данные)'
actions.login(email)
actions.register(self, name1)
actions.check_profile(self, name1)
actions.change_name(self, name2)
actions.unregister(self)
actions.register(self, name3)
actions.check_profile(self, name3)
def test_course_not_available(self):
"""Tests course is only accessible to author when incomplete."""
email = '[email protected]'
name = 'Test Course Not Available'
actions.login(email)
actions.register(self, name)
# Check preview and static resources are available.
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.4.js')
assert_equals(response.status_int, 200)
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Check preview and static resources are not available to Student.
response = self.get('course', expect_errors=True)
assert_equals(response.status_int, 404)
response = self.get('assets/js/activity-1.4.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Check preview and static resources are still available to author.
actions.login(email, True)
response = self.get('course')
assert_equals(response.status_int, 200)
response = self.get('assets/js/activity-1.4.js')
assert_equals(response.status_int, 200)
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_registration_closed(self):
"""Test student registration when course is full."""
email = '[email protected]'
name = 'Test Registration Closed'
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['reg_form']['can_register'] = False
return environ
sites.ApplicationContext.get_environ = get_environ_new
# Try to login and register.
actions.login(email)
try:
actions.register(self, name)
raise actions.ShouldHaveFailedByNow(
'Expected to fail: new registrations should not be allowed '
'when registration is closed.')
except actions.ShouldHaveFailedByNow as e:
raise e
except:
pass
# Clean up app_context.
sites.ApplicationContext.get_environ = get_environ_old
def test_permissions(self):
"""Test student permissions, and which pages they can view."""
email = '[email protected]'
name = 'Test Permissions'
actions.login(email)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
actions.unregister(self)
actions.Permissions.assert_unenrolled(self)
actions.register(self, name)
actions.Permissions.assert_enrolled(self)
def test_login_and_logout(self):
"""Test if login and logout behave as expected."""
email = '[email protected]'
actions.Permissions.assert_logged_out(self)
actions.login(email)
actions.Permissions.assert_unenrolled(self)
actions.logout()
actions.Permissions.assert_logged_out(self)
def test_lesson_activity_navigation(self):
"""Test navigation between lesson/activity pages."""
email = '[email protected]'
name = 'Test Lesson Activity Navigation'
actions.login(email)
actions.register(self, name)
response = self.get('unit?unit=1&lesson=1')
assert_does_not_contain('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=2&lesson=3')
assert_contains('Previous Page', response.body)
assert_contains('Next Page', response.body)
response = self.get('unit?unit=3&lesson=5')
assert_contains('Previous Page', response.body)
assert_does_not_contain('Next Page', response.body)
assert_contains('End', response.body)
def test_attempt_activity_event(self):
"""Test activity attempt generates event."""
email = '[email protected]'
name = 'Test Attempt Activity Event'
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Prepare event.
request = {}
request['source'] = 'test-source'
request['payload'] = transforms.dumps({'Alice': u'Bob (тест данные)'})
# Check XSRF token is required.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert_contains('"status": 403', response.body)
# Check PUT works.
request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(
'event-post')
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check event is properly recorded.
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
events = models.EventEntity.all().fetch(1000)
assert 1 == len(events)
assert_contains(
u'Bob (тест данные)',
transforms.loads(events[0].data)['Alice'])
finally:
namespace_manager.set_namespace(old_namespace)
# Clean up.
config.Registry.test_overrides = {}
def test_two_students_dont_see_each_other_pages(self):
"""Test a user can't see another user pages."""
email1 = '[email protected]'
name1 = 'User 1'
email2 = '[email protected]'
name2 = 'User 2'
# Login as one user and view 'unit' and other pages, which are not
# cached.
actions.login(email1)
actions.register(self, name1)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email1, response.body)
actions.logout()
# Login as another user and check that 'unit' and other pages show
# the correct new email.
actions.login(email2)
actions.register(self, name2)
actions.Permissions.assert_enrolled(self)
response = actions.view_unit(self)
assert_contains(email2, response.body)
actions.logout()
def test_xsrf_defence(self):
"""Test defense against XSRF attack."""
email = '[email protected]'
name = 'Test Xsrf Defence'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
response.form.set('name', 'My New Name')
response.form.set('xsrf_token', 'bad token')
response = response.form.submit(expect_errors=True)
assert_equals(response.status_int, 403)
def test_response_headers(self):
"""Test dynamically-generated responses use proper headers."""
email = '[email protected]'
name = 'Test Response Headers'
actions.login(email)
actions.register(self, name)
response = self.get('student/home')
assert_equals(response.status_int, 200)
assert_contains('must-revalidate', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Cache-Control'])
assert_contains('no-cache', response.headers['Pragma'])
assert_contains('Mon, 01 Jan 1990', response.headers['Expires'])
class StaticHandlerTest(actions.TestBase):
"""Check serving of static resources."""
def test_static_files_cache_control(self):
"""Test static/zip handlers use proper Cache-Control headers."""
# Check static handler.
response = self.get('/assets/css/main.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
# Check zip file handler.
response = self.testapp.get(
'/static/inputex-3.1.0/src/inputex/assets/skins/sam/inputex.css')
assert_equals(response.status_int, 200)
assert_contains('max-age=600', response.headers['Cache-Control'])
assert_contains('public', response.headers['Cache-Control'])
assert_does_not_contain('no-cache', response.headers['Cache-Control'])
class ActivityTest(actions.TestBase):
"""Test for activities."""
def get_activity(self, unit_id, lesson_id, args):
"""Retrieve the activity page for a given unit and lesson id."""
response = self.get('activity?unit=%s&lesson=%s' % (unit_id, lesson_id))
assert_equals(response.status_int, 200)
assert_contains(
'<script src="assets/lib/activity-generic-%s.%s.js"></script>' %
(unit_id, lesson_id), response.body)
js_response = self.get('assets/lib/activity-generic-1.2.js')
assert_equals(js_response.status_int, 200)
# Extract XSRF token from the page.
match = re.search(r'eventXsrfToken = [\']([^\']+)', response.body)
assert match
xsrf_token = match.group(1)
args['xsrf_token'] = xsrf_token
return response, args
def test_activities(self):
"""Test that activity submissions are handled and recorded correctly."""
email = '[email protected]'
name = 'Test Activities'
unit_id = 1
lesson_id = 2
activity_submissions = {
'1.2': {
'index': 3,
'type': 'activity-choice',
'value': 3,
'correct': True,
},
}
# Register.
actions.login(email)
actions.register(self, name)
# Enable event recording.
config.Registry.test_overrides[
lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True
# Navigate to the course overview page, and check that the unit shows
# no progress yet.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-notstarted-%s"' % unit_id, response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
response, args = self.get_activity(unit_id, lesson_id, {})
# Check that the current activity shows no progress yet.
assert_contains(
u'id="progress-notstarted-%s"' % lesson_id, response.body)
# Prepare activity submission event.
args['source'] = 'attempt-activity'
lesson_key = '%s.%s' % (unit_id, lesson_id)
assert lesson_key in activity_submissions
args['payload'] = activity_submissions[lesson_key]
args['payload']['location'] = (
'http://localhost:8080/activity?unit=%s&lesson=%s' %
(unit_id, lesson_id))
args['payload'] = transforms.dumps(args['payload'])
# Submit the request to the backend.
response = self.post('rest/events?%s' % urllib.urlencode(
{'request': transforms.dumps(args)}), {})
assert_equals(response.status_int, 200)
assert not response.body
# Check that the current activity shows partial progress.
response, args = self.get_activity(unit_id, lesson_id, {})
assert_contains(
u'id="progress-inprogress-%s"' % lesson_id, response.body)
# Navigate to the course overview page and check that the unit shows
# partial progress.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(
u'id="progress-inprogress-%s"' % unit_id, response.body)
finally:
namespace_manager.set_namespace(old_namespace)
def test_progress(self):
"""Test student activity progress in detail, using the sample course."""
class FakeHandler(object):
def __init__(self, app_context):
self.app_context = app_context
course = Course(FakeHandler(sites.get_all_courses()[0]))
tracker = course.get_progress_tracker()
student = models.Student(key_name='key-test-student')
# Initially, all progress entries should be set to zero.
unit_progress = tracker.get_unit_progress(student)
for key in unit_progress:
assert unit_progress[key] == 0
lesson_progress = tracker.get_lesson_progress(student, 1)
for key in lesson_progress:
assert lesson_progress[key] == 0
# The blocks in Lesson 1.2 with activities are blocks 3 and 6.
# Submitting block 3 should trigger an in-progress update.
tracker.put_block_completed(student, 1, 2, 3)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == 1
# Submitting block 6 should trigger a completion update for Lesson 1.2.
tracker.put_block_completed(student, 1, 2, 6)
assert tracker.get_unit_progress(student)['1'] == 1
assert tracker.get_lesson_progress(student, 1)[2] == 2
# Test a lesson with no interactive blocks in its activity. It should
# change its status to 'completed' once it is accessed.
tracker.put_activity_accessed(student, 2, 1)
assert tracker.get_unit_progress(student)['2'] == 1
assert tracker.get_lesson_progress(student, 2)[1] == 2
# Test that a lesson without activities (Lesson 1.1) doesn't count.
# Complete lessons 1.3, 1.4, 1.5 and 1.6; unit 1 should then be marked
# as 'completed' even though we have no events associated with
# Lesson 1.1.
tracker.put_activity_completed(student, 1, 3)
assert tracker.get_unit_progress(student)['1'] == 1
tracker.put_activity_completed(student, 1, 4)
assert tracker.get_unit_progress(student)['1'] == 1
tracker.put_activity_completed(student, 1, 5)
assert tracker.get_unit_progress(student)['1'] == 1
tracker.put_activity_completed(student, 1, 6)
assert tracker.get_unit_progress(student)['1'] == 2
# Test that a unit is not completed until all activity pages have been,
# at least, visited. Unit 6 has 3 lessons; the last one has no
# activity block.
tracker.put_activity_completed(student, 6, 1)
tracker.put_activity_completed(student, 6, 2)
assert tracker.get_unit_progress(student)['6'] == 1
tracker.put_activity_accessed(student, 6, 3)
assert tracker.get_unit_progress(student)['6'] == 2
# Test assessment counters.
pre_id = 'Pre'
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 1
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 2
tracker.put_assessment_completed(student, pre_id)
progress = tracker.get_or_create_progress(student)
assert tracker.is_assessment_completed(progress, pre_id)
assert tracker.get_assessment_status(progress, pre_id) == 3
# Test that invalid keys do not lead to any updates.
# Invalid assessment id.
fake_id = 'asdf'
tracker.put_assessment_completed(student, fake_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_assessment_completed(progress, fake_id)
assert tracker.get_assessment_status(progress, fake_id) is None
# Invalid unit id.
tracker.put_activity_completed(student, fake_id, 1)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, fake_id, 1) is None
# Invalid lesson id.
fake_numeric_id = 22
tracker.put_activity_completed(student, 1, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert tracker.get_activity_status(progress, 1, fake_numeric_id) is None
# Invalid block id.
tracker.put_block_completed(student, 5, 2, fake_numeric_id)
progress = tracker.get_or_create_progress(student)
assert not tracker.is_block_completed(
progress, 5, 2, fake_numeric_id)
class AssessmentTest(actions.TestBase):
"""Test for assessments."""
def test_course_pass(self):
"""Test student passing final exam."""
email = '[email protected]'
name = 'Test Pass'
post = {'assessment_type': 'Fin', 'score': '100.00'}
# Register.
actions.login(email)
actions.register(self, name)
# Submit answer.
response = actions.submit_assessment(self, 'Fin', post)
assert_equals(response.status_int, 200)
assert_contains('your overall course score of 70%', response.body)
assert_contains('you have passed the course', response.body)
# Check that the result shows up on the profile page.
response = actions.check_profile(self, name)
assert_contains('70', response.body)
assert_contains('100', response.body)
def test_assessments(self):
"""Test assessment scores are properly submitted and summarized."""
course = courses.Course(None, app_context=sites.get_all_courses()[0])
email = '[email protected]'
name = 'Test Assessments'
pre_answers = [{'foo': 'bar'}, {'Alice': u'Bob (тест данные)'}]
pre = {
'assessment_type': 'Pre', 'score': '1.00',
'answers': transforms.dumps(pre_answers)}
mid = {'assessment_type': 'Mid', 'score': '2.00'}
fin = {'assessment_type': 'Fin', 'score': '3.00'}
second_mid = {'assessment_type': 'Mid', 'score': '1.00'}
second_fin = {'assessment_type': 'Fin', 'score': '100000'}
# Register.
actions.login(email)
actions.register(self, name)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_does_not_contain(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Mid', response.body)
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.namespace)
try:
student = models.Student.get_enrolled_student_by_email(email)
# Check that three score objects (corresponding to the Pre, Mid and
# Fin assessments) exist right now, and that they all have zero
# score.
student_scores = course.get_all_scores(student)
assert len(student_scores) == 3
for assessment in student_scores:
assert assessment['score'] == 0
# Submit assessments and check that the score is updated.
actions.submit_assessment(self, 'Pre', pre)
student = models.Student.get_enrolled_student_by_email(email)
student_scores = course.get_all_scores(student)
assert len(student_scores) == 3
for assessment in student_scores:
if assessment['id'] == 'Pre':
assert assessment['score'] > 0
else:
assert assessment['score'] == 0
actions.submit_assessment(self, 'Mid', mid)
student = models.Student.get_enrolled_student_by_email(email)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Pre', response.body)
assert_contains(u'id="progress-completed-Mid', response.body)
assert_contains(u'id="progress-notstarted-Fin', response.body)
# Submit the final assessment.
actions.submit_assessment(self, 'Fin', fin)
student = models.Student.get_enrolled_student_by_email(email)
# Navigate to the course overview page.
response = self.get('course')
assert_equals(response.status_int, 200)
assert_contains(u'id="progress-completed-Fin', response.body)
# Check that the overall-score is non-zero.
assert course.get_overall_score(student)
# Check assessment answers.
answers = transforms.loads(
models.StudentAnswersEntity.get_by_key_name(
student.user_id).data)
assert pre_answers == answers['Pre']
# pylint: disable-msg=g-explicit-bool-comparison
assert [] == answers['Mid']
assert [] == answers['Fin']
# pylint: enable-msg=g-explicit-bool-comparison
# Check that scores are recorded properly.
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Try posting a new midcourse exam with a lower score;
# nothing should change.
actions.submit_assessment(self, 'Mid', second_mid)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 3
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 3)))
# Now try posting a postcourse exam with a higher score and note
# the changes.
actions.submit_assessment(self, 'Fin', second_fin)
student = models.Student.get_enrolled_student_by_email(email)
assert int(course.get_score(student, 'Pre')) == 1
assert int(course.get_score(student, 'Mid')) == 2
assert int(course.get_score(student, 'Fin')) == 100000
assert (int(course.get_overall_score(student)) ==
int((0.30 * 2) + (0.70 * 100000)))
finally:
namespace_manager.set_namespace(old_namespace)
def remove_dir(dir_name):
"""Delete a directory."""
logging.info('removing folder: %s', dir_name)
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
if os.path.exists(dir_name):
raise Exception('Failed to delete directory: %s' % dir_name)
def clean_dir(dir_name):
"""Clean a directory."""
remove_dir(dir_name)
logging.info('creating folder: %s', dir_name)
os.makedirs(dir_name)
if not os.path.exists(dir_name):
raise Exception('Failed to create directory: %s' % dir_name)
def clone_canonical_course_data(src, dst):
"""Makes a copy of canonical course content."""
clean_dir(dst)
def copytree(name):
shutil.copytree(
os.path.join(src, name),
os.path.join(dst, name))
copytree('assets')
copytree('data')
copytree('views')
shutil.copy(
os.path.join(src, 'course.yaml'),
os.path.join(dst, 'course.yaml'))
# Make all files writable.
for root, unused_dirs, files in os.walk(dst):
for afile in files:
fname = os.path.join(root, afile)
os.chmod(fname, 0o777)
class GeneratedCourse(object):
"""A helper class for a dynamically generated course content."""
@classmethod
def set_data_home(cls, test):
"""All data for this test will be placed here."""
cls.data_home = os.path.join(TEST_DATA_BASE, test.__class__.__name__)
def __init__(self, ns):
self.path = ns
@property
def namespace(self):
return 'ns%s' % self.path
@property
def title(self):
return u'Power Searching with Google title-%s (тест данные)' % self.path
@property
def unit_title(self):
return u'Interpreting results unit-title-%s (тест данные)' % self.path
@property
def lesson_title(self):
return u'Word order matters lesson-title-%s (тест данные)' % self.path
@property
def head(self):
return '<!-- head-%s -->' % self.path
@property
def css(self):
return '<!-- css-%s -->' % self.path
@property
def home(self):
return os.path.join(self.data_home, 'data-%s' % self.path)
@property
def email(self):
return 'walk_the_course_named_%[email protected]' % self.path
@property
def name(self):
return 'Walk The Course Named %s' % self.path
class MultipleCoursesTestBase(actions.TestBase):
"""Configures several courses for running concurrently."""
def modify_file(self, filename, find, replace):
"""Read, modify and write back the file."""
text = open(filename, 'r').read().decode('utf-8')
# Make sure target text is not in the file.
assert not replace in text
text = text.replace(find, replace)
assert replace in text
open(filename, 'w').write(text.encode('utf-8'))
def modify_canonical_course_data(self, course):
"""Modify canonical content by adding unique bits to it."""
self.modify_file(
os.path.join(course.home, 'course.yaml'),
'title: \'Power Searching with Google\'',
'title: \'%s\'' % course.title)
self.modify_file(
os.path.join(course.home, 'data/unit.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Word order matters,',
',%s,' % course.lesson_title)
self.modify_file(
os.path.join(course.home, 'data/lesson.csv'),
',Interpreting results,',
',%s,' % course.unit_title)
self.modify_file(
os.path.join(course.home, 'views/base.html'),
'<head>',
'<head>\n%s' % course.head)
self.modify_file(
os.path.join(course.home, 'assets/css/main.css'),
'html {',
'%s\nhtml {' % course.css)
def prepare_course_data(self, course):
"""Create unique course content for a course."""
clone_canonical_course_data(self.bundle_root, course.home)
self.modify_canonical_course_data(course)
def setUp(self): # pylint: disable-msg=g-bad-name
"""Configure the test."""
super(MultipleCoursesTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
self.course_a = GeneratedCourse('a')
self.course_b = GeneratedCourse('b')
self.course_ru = GeneratedCourse('ru')
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
clean_dir(GeneratedCourse.data_home)
self.prepare_course_data(self.course_a)
self.prepare_course_data(self.course_b)
self.prepare_course_data(self.course_ru)
# Setup one course for I18N.
self.modify_file(
os.path.join(self.course_ru.home, 'course.yaml'),
'locale: \'en_US\'',
'locale: \'ru\'')
# Configure courses.
sites.setup_courses('%s, %s, %s' % (
'course:/courses/a:/data-a:nsa',
'course:/courses/b:/data-b:nsb',
'course:/courses/ru:/data-ru:nsru'))
def tearDown(self): # pylint: disable-msg=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(MultipleCoursesTestBase, self).tearDown()
def walk_the_course(
self, course, first_time=True, is_admin=False, logout=True):
"""Visit a course as a Student would."""
# Check normal user has no access.
actions.login(course.email, is_admin)
# Test schedule.
if first_time:
response = self.testapp.get('/courses/%s/preview' % course.path)
else:
response = self.testapp.get('/courses/%s/course' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.unit_title, response.body)
assert_contains(course.head, response.body)
# Tests static resource.
response = self.testapp.get(
'/courses/%s/assets/css/main.css' % course.path)
assert_contains(course.css, response.body)
if first_time:
# Test registration.
response = self.get('/courses/%s/register' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.head, response.body)
response.form.set('form01', course.name)
response.form.action = '/courses/%s/register' % course.path
response = self.submit(response.form)
assert_contains(course.title, response.body)
assert_contains(course.head, response.body)
assert_contains(course.title, response.body)
assert_contains(
'//groups.google.com/group/My-Course-Announce', response.body)
assert_contains(
'//groups.google.com/group/My-Course', response.body)
# Check lesson page.
response = self.testapp.get(
'/courses/%s/unit?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
# Check activity page.
response = self.testapp.get(
'/courses/%s/activity?unit=1&lesson=5' % course.path)
assert_contains(course.title, response.body)
assert_contains(course.lesson_title, response.body)
assert_contains(course.head, response.body)
if logout:
actions.logout()
class MultipleCoursesTest(MultipleCoursesTestBase):
"""Test several courses running concurrently."""
def test_courses_are_isolated(self):
"""Test each course serves its own assets, views and data."""
# Pretend students visit courses.
self.walk_the_course(self.course_a)
self.walk_the_course(self.course_b)
self.walk_the_course(self.course_a, False)
self.walk_the_course(self.course_b, False)
# Check course namespaced data.
self.validate_course_data(self.course_a)
self.validate_course_data(self.course_b)
# Check default namespace.
assert (
namespace_manager.get_namespace() ==
appengine_config.DEFAULT_NAMESPACE_NAME)
assert not models.Student.all().fetch(1000)
def validate_course_data(self, course):
"""Check course data is valid."""
old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(course.namespace)
try:
students = models.Student.all().fetch(1000)
assert len(students) == 1
for student in students:
assert_equals(course.email, student.key().name())
assert_equals(course.name, student.name)
finally:
namespace_manager.set_namespace(old_namespace)
class I18NTest(MultipleCoursesTestBase):
"""Test courses running in different locales and containing I18N content."""
def test_csv_supports_utf8(self):
"""Test UTF-8 content in CSV file is handled correctly."""
title_ru = u'Найди факты быстрее'
csv_file = os.path.join(self.course_ru.home, 'data/unit.csv')
self.modify_file(
csv_file, ',Find facts faster,', ',%s,' % title_ru)
self.modify_file(
os.path.join(self.course_ru.home, 'data/lesson.csv'),
',Find facts faster,', ',%s,' % title_ru)
rows = []
for row in csv.reader(open(csv_file)):
rows.append(row)
assert title_ru == rows[6][3].decode('utf-8')
response = self.get('/courses/%s/preview' % self.course_ru.path)
assert_contains(title_ru, response.body)
# Tests student perspective.
self.walk_the_course(self.course_ru, first_time=True)
self.walk_the_course(self.course_ru, first_time=False)
# Test course author dashboard.
self.walk_the_course(
self.course_ru, first_time=False, is_admin=True, logout=False)
def assert_page_contains(page_name, text_array):
dashboard_url = '/courses/%s/dashboard' % self.course_ru.path
response = self.get('%s?action=%s' % (dashboard_url, page_name))
for text in text_array:
assert_contains(text, response.body)
assert_page_contains('', [
title_ru, self.course_ru.unit_title, self.course_ru.lesson_title])
assert_page_contains(
'assets', [self.course_ru.title])
assert_page_contains(
'settings', [
self.course_ru.title,
vfs.AbstractFileSystem.normpath(self.course_ru.home)])
# Clean up.
actions.logout()
def test_i18n(self):
"""Test course is properly internationalized."""
response = self.get('/courses/%s/preview' % self.course_ru.path)
assert_contains_all_of(
[u'Войти', u'Регистрация', u'Расписание', u'Курс'], response.body)
class CourseUrlRewritingTestBase(actions.TestBase):
"""Prepare course for using rewrite rules and '/courses/pswg' base URL."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(CourseUrlRewritingTestBase, self).setUp()
self.base = '/courses/pswg'
self.namespace = 'gcb-courses-pswg-tests-ns'
sites.setup_courses('course:%s:/:%s' % (self.base, self.namespace))
def tearDown(self): # pylint: disable-msg=g-bad-name
sites.reset_courses()
super(CourseUrlRewritingTestBase, self).tearDown()
def canonicalize(self, href, response=None):
"""Canonicalize URL's using either <base> or self.base."""
# Check if already canonicalized.
if href.startswith(
self.base) or utils.ApplicationHandler.is_absolute(href):
pass
else:
# Look for <base> tag in the response to compute the canonical URL.
if response:
return super(CourseUrlRewritingTestBase, self).canonicalize(
href, response)
# Prepend self.base to compute the canonical URL.
if not href.startswith('/'):
href = '/%s' % href
href = '%s%s' % (self.base, href)
self.audit_url(href)
return href
class VirtualFileSystemTestBase(actions.TestBase):
"""Prepares a course running on a virtual local file system."""
def setUp(self): # pylint: disable-msg=g-bad-name
"""Configure the test."""
super(VirtualFileSystemTestBase, self).setUp()
GeneratedCourse.set_data_home(self)
# Override BUNDLE_ROOT.
self.bundle_root = appengine_config.BUNDLE_ROOT
appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home
# Prepare course content.
home_folder = os.path.join(GeneratedCourse.data_home, 'data-v')
clone_canonical_course_data(self.bundle_root, home_folder)
# Configure course.
self.namespace = 'nsv'
sites.setup_courses('course:/:/data-vfs:%s' % self.namespace)
# Modify app_context filesystem to map /data-v to /data-vfs.
def after_create(unused_cls, instance):
# pylint: disable-msg=protected-access
instance._fs = vfs.AbstractFileSystem(
vfs.LocalReadOnlyFileSystem(
os.path.join(GeneratedCourse.data_home, 'data-vfs'),
home_folder))
sites.ApplicationContext.after_create = after_create
def tearDown(self): # pylint: disable-msg=g-bad-name
"""Clean up."""
sites.reset_courses()
appengine_config.BUNDLE_ROOT = self.bundle_root
super(VirtualFileSystemTestBase, self).tearDown()
class DatastoreBackedCourseTest(actions.TestBase):
"""Prepares an empty course running on datastore-backed file system."""
def setUp(self): # pylint: disable-msg=g-bad-name
"""Configure the test."""
super(DatastoreBackedCourseTest, self).setUp()
self.supports_editing = True
self.namespace = 'dsbfs'
sites.setup_courses('course:/::%s' % self.namespace)
all_courses = sites.get_all_courses()
assert len(all_courses) == 1
self.app_context = all_courses[0]
def tearDown(self): # pylint: disable-msg=g-bad-name
"""Clean up."""
sites.reset_courses()
super(DatastoreBackedCourseTest, self).tearDown()
def upload_all_in_dir(self, dir_name, files_added):
"""Uploads all files in a folder to vfs."""
root_dir = os.path.join(appengine_config.BUNDLE_ROOT, dir_name)
for root, unused_dirs, files in os.walk(root_dir):
for afile in files:
filename = os.path.join(root, afile)
self.app_context.fs.put(filename, open(filename, 'rb'))
files_added.append(filename)
def init_course_data(self, upload_files):
"""Uploads required course data files into vfs."""
files_added = []
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self.namespace)
upload_files(files_added)
# Normalize paths to be identical for Windows and Linux.
files_added_normpath = []
for file_added in files_added:
files_added_normpath.append(
vfs.AbstractFileSystem.normpath(file_added))
assert self.app_context.fs.list(
appengine_config.BUNDLE_ROOT) == sorted(files_added_normpath)
finally:
namespace_manager.set_namespace(old_namespace)
def upload_all_sample_course_files(self, files_added):
"""Uploads all sample course data files into vfs."""
self.upload_all_in_dir('assets', files_added)
self.upload_all_in_dir('views', files_added)
self.upload_all_in_dir('data', files_added)
course_yaml = os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')
self.app_context.fs.put(course_yaml, open(course_yaml, 'rb'))
files_added.append(course_yaml)
class DatastoreBackedCustomCourseTest(DatastoreBackedCourseTest):
"""Prepares a sample course running on datastore-backed file system."""
def test_course_import(self):
"""Test importing of the course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
self.namespace = 'ns_test'
self.base = '/test'
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Format import payload and URL.
payload_dict = {}
payload_dict['course'] = 'course:/:/'
request = {}
request['payload'] = transforms.dumps(payload_dict)
import_put_url = (
'/test/rest/course/import?%s' % urllib.urlencode(
{'request': transforms.dumps(request)}))
# Check non-logged user has no rights.
response = self.testapp.put(import_put_url, {}, expect_errors=True)
assert_equals(404, response.status_int)
# Login as admin.
email = '[email protected]'
name = 'Test Course Import'
actions.login(email, is_admin=True)
# Check course is empty.
response = self.get('/test/dashboard')
assert_equals(200, response.status_int)
assert_does_not_contain('Filter image results by color', response.body)
# Import sample course.
response = self.put(import_put_url, {})
assert_equals(200, response.status_int)
assert_contains('Imported.', response.body)
# Check course is not empty.
response = self.get('/test/dashboard')
assert_contains('Filter image results by color', response.body)
# Check assessment is copied.
response = self.get('/test/assets/js/assessment-21.js')
assert_equals(200, response.status_int)
assert_contains('Humane Society website', response.body)
# Check activity is copied.
response = self.get('/test/assets/js/activity-37.js')
assert_equals(200, response.status_int)
assert_contains('explore ways to keep yourself updated', response.body)
unit_2_title = 'Unit 2 - Interpreting results'
lesson_2_1_title = '2.1 When search results suggest something new'
lesson_2_2_title = '2.2 Thinking more deeply about your search'
# Check units and lessons are indexed correctly.
response = self.get('/test/preview')
assert_contains(unit_2_title, response.body)
actions.register(self, name)
response = self.get('/test/course')
assert_contains(unit_2_title, response.body)
# Unit page.
response = self.get('/test/unit?unit=9')
assert_contains( # A unit title.
unit_2_title, response.body)
assert_contains( # First child lesson without link.
lesson_2_1_title, response.body)
assert_contains( # Second child lesson with link.
lesson_2_2_title, response.body)
assert_contains_all_of( # Breabcrubms.
['Unit 2</a></li>', 'Lesson 1</li>'], response.body)
# Unit page.
response = self.get('/test/activity?unit=9&lesson=10')
assert_contains( # A unit title.
unit_2_title, response.body)
assert_contains( # An activity title.
'Lesson 2.1 Activity', response.body)
assert_contains( # First child lesson without link.
lesson_2_1_title, response.body)
assert_contains( # Second child lesson with link.
lesson_2_2_title, response.body)
assert_contains_all_of( # Breabcrubms.
['Unit 2</a></li>', 'Lesson 2</a></li>'], response.body)
# Clean up.
sites.reset_courses()
config.Registry.test_overrides = {}
def test_get_put_file(self):
"""Test that one can put/get file via REST interface."""
self.init_course_data(self.upload_all_sample_course_files)
email = '[email protected]'
actions.login(email, True)
response = self.get('dashboard?action=settings')
# Check course.yaml edit form.
compute_form = response.forms['edit_course_yaml']
response = self.submit(compute_form)
assert_equals(response.status_int, 302)
assert_contains(
'dashboard?action=edit_settings&key=%2Fcourse.yaml',
response.location)
response = self.get(response.location)
assert_contains('rest/files/item?key=%2Fcourse.yaml', response.body)
# Get text file.
response = self.get('rest/files/item?key=%2Fcourse.yaml')
assert_equals(response.status_int, 200)
json_dict = transforms.loads(
transforms.loads(response.body)['payload'])
assert '/course.yaml' == json_dict['key']
assert 'text/utf-8' == json_dict['encoding']
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'course.yaml')).read(
) == json_dict['content'])
def test_empty_course(self):
"""Test course with no assets and the simlest possible course.yaml."""
email = '[email protected]'
actions.login(email, True)
# Check minimal preview page comes up.
response = self.get('preview')
assert_contains('UNTITLED COURSE', response.body)
assert_contains('Registration', response.body)
# Check inheritable files are accessible.
response = self.get('/assets/css/main.css')
assert (open(os.path.join(
appengine_config.BUNDLE_ROOT, 'assets/css/main.css')).read(
) == response.body)
# Check non-inheritable files are not inherited.
response = self.testapp.get(
'/assets/js/activity-1.3.js', expect_errors=True)
assert_equals(response.status_int, 404)
# Login as admin.
email = '[email protected]'
actions.login(email, True)
response = self.get('dashboard')
# Add unit.
compute_form = response.forms['add_unit']
response = self.submit(compute_form)
response = self.get('/rest/course/unit?key=1')
assert_equals(response.status_int, 200)
# Add lessons.
response = self.get('dashboard')
compute_form = response.forms['add_lesson']
response = self.submit(compute_form)
response = self.get('/rest/course/lesson?key=2')
assert_equals(response.status_int, 200)
# Add assessment.
response = self.get('dashboard')
compute_form = response.forms['add_assessment']
response = self.submit(compute_form)
response = self.get('/rest/course/assessment?key=3')
assert_equals(response.status_int, 200)
# Add link.
response = self.get('dashboard')
compute_form = response.forms['add_link']
response = self.submit(compute_form)
response = self.get('/rest/course/link?key=4')
assert_equals(response.status_int, 200)
def import_sample_course(self):
"""Imports a sample course."""
# Setup courses.
sites.setup_courses('course:/test::ns_test, course:/:/')
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
# Clean up.
sites.reset_courses()
def test_imported_course_performace(self):
"""Tests various pages of the imported course."""
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
# Enable memcache.
config.Registry.test_overrides[
models.CAN_USE_MEMCACHE.name] = True
# Override course.yaml settings by patching app_context.
get_environ_old = sites.ApplicationContext.get_environ
def get_environ_new(self):
environ = get_environ_old(self)
environ['course']['now_available'] = True
return environ
sites.ApplicationContext.get_environ = get_environ_new
def custom_inc(unused_increment=1, context=None):
"""A custom inc() function for cache miss counter."""
self.keys.append(context)
self.count += 1
def assert_cached(url, assert_text, cache_miss_allowed=0):
"""Checks that specific URL supports caching."""
memcache.flush_all()
self.keys = []
self.count = 0
# Expect cache misses first time we load page.
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
assert cache_miss_before != self.count
# Expect no cache misses first time we load page.
self.keys = []
cache_miss_before = self.count
response = self.get(url)
assert_contains(assert_text, response.body)
cache_miss_actual = self.count - cache_miss_before
if cache_miss_actual != cache_miss_allowed:
raise Exception(
'Expected %s cache misses, got %s. Keys are:\n%s' % (
cache_miss_allowed, cache_miss_actual,
'\n'.join(self.keys)))
old_inc = models.CACHE_MISS.inc
models.CACHE_MISS.inc = custom_inc
# Walk the site.
email = '[email protected]'
name = 'Test Units Lessons'
assert_cached('preview', 'Putting it all together')
actions.login(email, True)
assert_cached('preview', 'Putting it all together')
actions.register(self, name)
assert_cached(
'unit?unit=9', 'When search results suggest something new')
assert_cached(
'unit?unit=9&lesson=12', 'Understand options for different media')
# Clean up.
models.CACHE_MISS.inc = old_inc
sites.ApplicationContext.get_environ = get_environ_old
config.Registry.test_overrides = {}
sites.reset_courses()
def test_imported_course(self):
"""Tests various pages of the imported course."""
# TODO(psimakov): Ideally, this test class should run all aspect tests
# and they all should pass. However, the id's in the cloned course
# do not match the id's of source sample course and we fetch pages
# and assert page content using id's. For now, we will check the minimal
# set of pages manually. Later, we have to make it run all known tests.
self.import_sample_course()
# Install a clone on the '/' so all the tests will treat it as normal
# sample course.
sites.setup_courses('course:/::ns_test')
self.namespace = 'ns_test'
email = '[email protected]'
name = 'Test Units Lessons'
actions.login(email, True)
response = self.get('preview')
assert_contains('Putting it all together', response.body)
actions.register(self, name)
actions.check_profile(self, name)
actions.view_announcements(self)
# Check unit page without lesson specified.
response = self.get('unit?unit=9')
assert_contains('Interpreting results', response.body)
assert_contains(
'When search results suggest something new', response.body)
# Check unit page with a lessons.
response = self.get('unit?unit=9&lesson=12')
assert_contains('Interpreting results', response.body)
assert_contains(
'Understand options for different media', response.body)
# Check assesment page.
response = self.get('assessment?name=21')
assert_contains(
'<script src="assets/js/assessment-21.js"></script>', response.body)
# Check activity page.
response = self.get('activity?unit=9&lesson=13')
assert_contains(
'<script src="assets/js/activity-13.js"></script>',
response.body)
# Clean up.
sites.reset_courses()
class DatastoreBackedSampleCourseTest(DatastoreBackedCourseTest):
"""Run all existing tests using datastore-backed file system."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(DatastoreBackedSampleCourseTest, self).setUp()
self.init_course_data(self.upload_all_sample_course_files)
class FakeEnvironment(object):
"""Temporary fake tools.etl.remote.Evironment.
Bypasses making a remote_api connection because webtest can't handle it and
we don't want to bring up a local server for our functional tests. When this
fake is used, the in-process datastore stub will handle RPCs.
TODO(johncox): find a way to make webtest successfully emulate the
remote_api endpoint and get rid of this fake.
"""
def __init__(self, application_id, server, path=None):
self._appication_id = application_id
self._path = path
self._server = server
def establish(self):
pass
class EtlMainTestCase(DatastoreBackedCourseTest):
"""Tests tools/etl/etl.py's main()."""
# Allow access to protected members under test.
# pylint: disable-msg=protected-access
def setUp(self):
"""Configures EtlMainTestCase."""
super(EtlMainTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# In etl.main, use test auth scheme to avoid interactive login.
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.test_tempdir = os.path.join(TEST_DATA_BASE, 'EtlMainTestCase')
self.archive_path = os.path.join(self.test_tempdir, 'archive.zip')
self.new_course_title = 'New Course Title'
self.sdk_path = os.environ.get('GOOGLE_APP_ENGINE_HOME')
# Find App Engine SDK folder by navigating up four folders from well
# known google.appengine.api.memcache.
self.sdk_path = os.path.abspath(memcache.__file__).rsplit(os.sep, 5)[0]
self.url_prefix = '/test'
self.raw = 'course:%s::ns_test' % self.url_prefix
self.swap(os, 'environ', self.test_environ)
self.common_args = [
etl._TYPES[0], self.url_prefix, 'myapp', 'localhost:8080',
self.archive_path, '--sdk_path', self.sdk_path]
self.download_args = etl._PARSER.parse_args(
['download'] + self.common_args)
self.upload_args = etl._PARSER.parse_args(['upload'] + self.common_args)
# Set up courses: version 1.3, version 1.2.
sites.setup_courses(self.raw + ', course:/:/')
self.reset_filesystem()
def tearDown(self):
self.reset_filesystem(remove_only=True)
sites.reset_courses()
super(EtlMainTestCase, self).tearDown()
def create_archive(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
args = etl._PARSER.parse_args(['download'] + self.common_args)
etl.main(args, environment_class=FakeEnvironment)
sites.reset_courses()
def create_empty_course(self, raw):
sites.setup_courses(raw)
context = etl._get_requested_context(
sites.get_all_courses(), self.url_prefix)
for entity in context.fs.impl.list(appengine_config.BUNDLE_ROOT):
context.fs.impl.delete(entity)
def import_sample_course(self):
"""Imports a sample course."""
# Import sample course.
dst_app_context = sites.get_all_courses()[0]
src_app_context = sites.get_all_courses()[1]
# Patch in a course.yaml.
yaml = copy.deepcopy(courses.DEFAULT_COURSE_YAML_DICT)
yaml['course']['title'] = self.new_course_title
dst_app_context.fs.impl.put(
os.path.join(appengine_config.BUNDLE_ROOT, etl._COURSE_YAML_PATH),
etl._ReadWrapper(str(yaml)), is_draft=False)
dst_course = courses.Course(None, app_context=dst_app_context)
errors = []
src_course_out, dst_course_out = dst_course.import_from(
src_app_context, errors)
if errors:
raise Exception(errors)
assert len(
src_course_out.get_units()) == len(dst_course_out.get_units())
dst_course_out.save()
def reset_filesystem(self, remove_only=False):
if os.path.exists(self.test_tempdir):
shutil.rmtree(self.test_tempdir)
if not remove_only:
os.makedirs(self.test_tempdir)
def test_download_creates_valid_archive(self):
"""Tests download of course data and archive creation."""
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_args, environment_class=FakeEnvironment)
# Don't use Archive and Manifest here because we want to test the raw
# structure of the emitted zipfile.
zip_archive = zipfile.ZipFile(self.archive_path)
manifest = transforms.loads(
zip_archive.open(etl._MANIFEST_FILENAME).read())
self.assertGreaterEqual(
courses.COURSE_MODEL_VERSION_1_3, manifest['version'])
self.assertEqual(
'course:%s::ns_test' % self.url_prefix, manifest['raw'])
for entity in manifest['entities']:
self.assertTrue(entity.has_key('is_draft'))
self.assertTrue(zip_archive.open(entity['path']))
def test_download_errors_if_archive_path_exists_on_disk(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
etl.main(self.download_args, environment_class=FakeEnvironment)
self.assertRaises(
SystemExit, etl.main, self.download_args,
environment_class=FakeEnvironment)
def test_download_errors_if_course_url_prefix_does_not_exist(self):
sites.reset_courses()
self.assertRaises(
SystemExit, etl.main, self.download_args,
environment_class=FakeEnvironment)
def test_download_errors_if_course_version_is_pre_1_3(self):
args = etl._PARSER.parse_args(
['download', 'course', '/'] + self.common_args[2:])
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, args, environment_class=FakeEnvironment)
def test_upload_fails_if_archive_cannot_be_opened(self):
sites.setup_courses(self.raw)
self.assertRaises(
SystemExit, etl.main, self.upload_args,
environment_class=FakeEnvironment)
def test_upload_fails_if_archive_course_json_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(etl._COURSE_JSON_PATH, 'garbage')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_args,
environment_class=FakeEnvironment)
def test_upload_fails_if_archive_course_yaml_malformed(self):
self.create_archive()
self.create_empty_course(self.raw)
zip_archive = zipfile.ZipFile(self.archive_path, 'a')
zip_archive.writestr(etl._COURSE_YAML_PATH, '{')
zip_archive.close()
self.assertRaises(
SystemExit, etl.main, self.upload_args,
environment_class=FakeEnvironment)
def test_upload_fails_if_course_with_units_found(self):
self.upload_all_sample_course_files([])
self.import_sample_course()
self.assertRaises(
SystemExit, etl.main, self.upload_args,
environment_class=FakeEnvironment)
def test_upload_fails_if_no_course_with_url_prefix_found(self):
self.create_archive()
self.assertRaises(
SystemExit, etl.main, self.upload_args,
environment_class=FakeEnvironment)
def test_upload_succeeds(self):
"""Tests upload of archive contents."""
self.create_archive()
self.create_empty_course(self.raw)
context = etl._get_requested_context(
sites.get_all_courses(), self.upload_args.course_url_prefix)
self.assertNotEqual(self.new_course_title, context.get_title())
etl.main(self.upload_args, environment_class=FakeEnvironment)
archive = etl._Archive(self.archive_path)
archive.open('r')
context = etl._get_requested_context(
sites.get_all_courses(), self.upload_args.course_url_prefix)
filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT)
self.assertEqual(
len(archive.manifest.entities), len(filesystem_contents))
self.assertEqual(self.new_course_title, context.get_title())
units = etl._get_course_from(context).get_units()
spot_check_single_unit = [u for u in units if u.unit_id == 9][0]
self.assertEqual('Interpreting results', spot_check_single_unit.title)
for unit in units:
self.assertTrue(unit.title)
for entity in archive.manifest.entities:
full_path = os.path.join(appengine_config.BUNDLE_ROOT, entity.path)
stream = context.fs.impl.get(full_path)
self.assertEqual(entity.is_draft, stream.metadata.is_draft)
# TODO(johncox): re-enable these tests once we figure out how to make webtest
# play nice with remote_api.
class EtlRemoteEnvironmentTestCase(actions.TestBase):
"""Tests tools/etl/remote.py."""
# Method name determined by superclass. pylint: disable-msg=g-bad-name
def setUp(self):
super(EtlRemoteEnvironmentTestCase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# Allow access to protected members under test.
# pylint: disable-msg=protected-access
def disabled_test_can_establish_environment_in_dev_mode(self):
# Stub the call that requires user input so the test runs unattended.
self.swap(__builtin__, 'raw_input', lambda _: 'username')
self.assertEqual(os.environ['SERVER_SOFTWARE'], remote.SERVER_SOFTWARE)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
def disabled_test_can_establish_environment_in_test_mode(self):
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.swap(os, 'environ', self.test_environ)
# establish() performs RPC. If it doesn't throw, we're good.
remote.Environment('mycourse', 'localhost:8080').establish()
class CourseUrlRewritingTest(CourseUrlRewritingTestBase):
"""Run all existing tests using '/courses/pswg' base URL rewrite rules."""
class VirtualFileSystemTest(VirtualFileSystemTestBase):
"""Run all existing tests using virtual local file system."""
class MemcacheTestBase(actions.TestBase):
"""Executes all tests with memcache enabled."""
def setUp(self): # pylint: disable-msg=g-bad-name
super(MemcacheTestBase, self).setUp()
config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True}
def tearDown(self): # pylint: disable-msg=g-bad-name
config.Registry.test_overrides = {}
super(MemcacheTestBase, self).setUp()
class MemcacheTest(MemcacheTestBase):
"""Executes all tests with memcache enabled."""
ALL_COURSE_TESTS = (
StudentAspectTest, AssessmentTest, CourseAuthorAspectTest,
StaticHandlerTest, AdminAspectTest)
MemcacheTest.__bases__ += (InfrastructureTest,) + ALL_COURSE_TESTS
CourseUrlRewritingTest.__bases__ += ALL_COURSE_TESTS
VirtualFileSystemTest.__bases__ += ALL_COURSE_TESTS
DatastoreBackedSampleCourseTest.__bases__ += ALL_COURSE_TESTS
| []
| []
| [
"GOOGLE_APP_ENGINE_HOME",
"gcb_admin_user_emails",
"APPLICATION_ID",
"USER_EMAIL",
"SERVER_SOFTWARE",
"AUTH_DOMAIN",
"USER_ID"
]
| [] | ["GOOGLE_APP_ENGINE_HOME", "gcb_admin_user_emails", "APPLICATION_ID", "USER_EMAIL", "SERVER_SOFTWARE", "AUTH_DOMAIN", "USER_ID"] | python | 7 | 0 | |
it/server/src/test/java/com/walmartlabs/concord/it/server/RequirementsIT.java | package com.walmartlabs.concord.it.server;
/*-
* *****
* Concord
* -----
* Copyright (C) 2017 - 2019 Walmart Inc.
* -----
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =====
*/
import com.walmartlabs.concord.client.*;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
import static com.walmartlabs.concord.it.common.ITUtils.archive;
import static com.walmartlabs.concord.it.common.ServerClient.assertLog;
import static com.walmartlabs.concord.it.common.ServerClient.waitForCompletion;
import static org.junit.Assert.*;
import static org.junit.Assume.assumeNotNull;
// requires an agent with custom "capabilities" configured
public class RequirementsIT extends AbstractServerIT {
@BeforeClass
public static void setUp() {
assumeNotNull(System.getenv("IT_CUSTOM_AGENTS"));
}
@Test(timeout = DEFAULT_TEST_TIMEOUT)
public void testForkWithRequirements() throws Exception {
String orgName = "org_" + randomString();
OrganizationsApi orgApi = new OrganizationsApi(getApiClient());
orgApi.createOrUpdate(new OrganizationEntry().setName(orgName));
String projectName = "project_" + randomString();
ProjectsApi projectsApi = new ProjectsApi(getApiClient());
projectsApi.createOrUpdate(orgName, new ProjectEntry()
.setName(projectName)
.setVisibility(ProjectEntry.VisibilityEnum.PUBLIC)
.setRawPayloadMode(ProjectEntry.RawPayloadModeEnum.EVERYONE));
byte[] payload = archive(ProcessRbacIT.class.getResource("concordTaskForkWithRequirements").toURI());
Map<String, Object> input = new HashMap<>();
input.put("archive", payload);
input.put("org", orgName);
input.put("project", projectName);
StartProcessResponse parentSpr = start(input);
ProcessApi processApi = new ProcessApi(getApiClient());
ProcessEntry pe = waitForCompletion(processApi, parentSpr.getInstanceId());
assertNotNull(pe.getRequirements());
assertFalse(pe.getRequirements().isEmpty());
ProcessEntry processEntry = processApi.get(parentSpr.getInstanceId());
assertEquals(1, processEntry.getChildrenIds().size());
ProcessEntry child = processApi.get(processEntry.getChildrenIds().get(0));
assertNotNull(child);
assertEquals(ProcessEntry.StatusEnum.FINISHED, child.getStatus());
// ---
byte[] ab = getLog(child.getLogFileName());
assertLog(".*Hello from a subprocess.*", ab);
// ---
assertNotNull(child.getRequirements());
assertEquals(pe.getRequirements(), child.getRequirements());
}
}
| [
"\"IT_CUSTOM_AGENTS\""
]
| []
| [
"IT_CUSTOM_AGENTS"
]
| [] | ["IT_CUSTOM_AGENTS"] | java | 1 | 0 | |
config.go | // Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beego
import (
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"github.com/astaxie/beego/config"
"github.com/astaxie/beego/context"
"github.com/astaxie/beego/logs"
"github.com/astaxie/beego/session"
"github.com/astaxie/beego/utils"
)
// Config is the main struct for BConfig
type Config struct {
AppName string //Application name
RunMode string //Running Mode: dev | prod
RouterCaseSensitive bool
ServerName string
RecoverPanic bool
RecoverFunc func(*context.Context)
CopyRequestBody bool
EnableGzip bool
MaxMemory int64
EnableErrorsShow bool
EnableErrorsRender bool
Listen Listen
WebConfig WebConfig
Log LogConfig
}
// Listen holds for http and https related config
type Listen struct {
Graceful bool // Graceful means use graceful module to start the server
ServerTimeOut int64
ListenTCP4 bool
EnableHTTP bool
HTTPAddr string
HTTPPort int
AutoTLS bool
Domains []string
TLSCacheDir string
EnableHTTPS bool
EnableMutualHTTPS bool
HTTPSAddr string
HTTPSPort int
HTTPSCertFile string
HTTPSKeyFile string
TrustCaFile string
EnableAdmin bool
AdminAddr string
AdminPort int
EnableFcgi bool
EnableStdIo bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O
}
// WebConfig holds web related config
type WebConfig struct {
AutoRender bool
EnableDocs bool
FlashName string
FlashSeparator string
DirectoryIndex bool
StaticDir map[string]string
StaticExtensionsToGzip []string
TemplateLeft string
TemplateRight string
ViewsPath string
EnableXSRF bool
XSRFKey string
XSRFExpire int
Session SessionConfig
}
// SessionConfig holds session related config
type SessionConfig struct {
SessionOn bool
SessionProvider string
SessionName string
SessionGCMaxLifetime int64
SessionProviderConfig string
SessionCookieLifeTime int
SessionAutoSetCookie bool
SessionDomain string
SessionDisableHTTPOnly bool // used to allow for cross domain cookies/javascript cookies.
SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader string
SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params
}
// LogConfig holds Log related config
type LogConfig struct {
AccessLogs bool
EnableStaticLogs bool //log static files requests default: false
AccessLogsFormat string //access log format: JSON_FORMAT, APACHE_FORMAT or empty string
FileLineNum bool
Outputs map[string]string // Store Adaptor : config
}
var (
// BConfig is the default config for Application
BConfig *Config
// AppConfig is the instance of Config, store the config information from file
AppConfig *beegoAppConfig
// AppPath is the absolute path to the app
AppPath string
// GlobalSessions is the instance for the session manager
GlobalSessions *session.Manager
// appConfigPath is the path to the config files
appConfigPath string
// appConfigProvider is the provider for the config, default is ini
appConfigProvider = "ini"
)
func init() {
BConfig = newBConfig()
var err error
if AppPath, err = filepath.Abs(filepath.Dir(os.Args[0])); err != nil {
panic(err)
}
workPath, err := os.Getwd()
if err != nil {
panic(err)
}
var filename = "app.conf"
if os.Getenv("BEEGO_RUNMODE") != "" {
filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf"
}
appConfigPath = filepath.Join(workPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
appConfigPath = filepath.Join(AppPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
AppConfig = &beegoAppConfig{innerConfig: config.NewFakeConfig()}
return
}
}
if err = parseConfig(appConfigPath); err != nil {
panic(err)
}
}
func recoverPanic(ctx *context.Context) {
if err := recover(); err != nil {
if err == ErrAbort {
return
}
if !BConfig.RecoverPanic {
panic(err)
}
if BConfig.EnableErrorsShow {
if _, ok := ErrorMaps[fmt.Sprint(err)]; ok {
exception(fmt.Sprint(err), ctx)
return
}
}
var stack string
logs.Critical("the request url is ", ctx.Input.URL())
logs.Critical("Handler crashed with error", err)
for i := 1; ; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
logs.Critical(fmt.Sprintf("%s:%d", file, line))
stack = stack + fmt.Sprintln(fmt.Sprintf("%s:%d", file, line))
}
if BConfig.RunMode == DEV && BConfig.EnableErrorsRender {
showErr(err, ctx, stack)
}
if ctx.Output.Status != 0 {
ctx.ResponseWriter.WriteHeader(ctx.Output.Status)
} else {
ctx.ResponseWriter.WriteHeader(500)
}
}
}
func newBConfig() *Config {
return &Config{
AppName: "beego",
RunMode: PROD,
RouterCaseSensitive: true,
ServerName: "beegoServer:" + VERSION,
RecoverPanic: true,
RecoverFunc: recoverPanic,
CopyRequestBody: false,
EnableGzip: false,
MaxMemory: 1 << 26, //64MB
EnableErrorsShow: true,
EnableErrorsRender: true,
Listen: Listen{
Graceful: false,
ServerTimeOut: 0,
ListenTCP4: false,
EnableHTTP: true,
AutoTLS: false,
Domains: []string{},
TLSCacheDir: ".",
HTTPAddr: "",
HTTPPort: 8080,
EnableHTTPS: false,
HTTPSAddr: "",
HTTPSPort: 10443,
HTTPSCertFile: "",
HTTPSKeyFile: "",
EnableAdmin: false,
AdminAddr: "",
AdminPort: 8088,
EnableFcgi: false,
EnableStdIo: false,
},
WebConfig: WebConfig{
AutoRender: true,
EnableDocs: false,
FlashName: "BEEGO_FLASH",
FlashSeparator: "BEEGOFLASH",
DirectoryIndex: false,
StaticDir: map[string]string{"/static": "static"},
StaticExtensionsToGzip: []string{".css", ".js"},
TemplateLeft: "{{",
TemplateRight: "}}",
ViewsPath: "views",
EnableXSRF: false,
XSRFKey: "beegoxsrf",
XSRFExpire: 0,
Session: SessionConfig{
SessionOn: false,
SessionProvider: "memory",
SessionName: "beegosessionID",
SessionGCMaxLifetime: 3600,
SessionProviderConfig: "",
SessionDisableHTTPOnly: false,
SessionCookieLifeTime: 0, //set cookie default is the browser life
SessionAutoSetCookie: true,
SessionDomain: "",
SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader: "Beegosessionid",
SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params
},
},
Log: LogConfig{
AccessLogs: false,
EnableStaticLogs: false,
AccessLogsFormat: "APACHE_FORMAT",
FileLineNum: true,
Outputs: map[string]string{"console": ""},
},
}
}
// now only support ini, next will support json.
func parseConfig(appConfigPath string) (err error) {
AppConfig, err = newAppConfig(appConfigProvider, appConfigPath)
if err != nil {
return err
}
return assignConfig(AppConfig)
}
func assignConfig(ac config.Configer) error {
for _, i := range []interface{}{BConfig, &BConfig.Listen, &BConfig.WebConfig, &BConfig.Log, &BConfig.WebConfig.Session} {
assignSingleConfig(i, ac)
}
// set the run mode first
if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" {
BConfig.RunMode = envRunMode
} else if runMode := ac.String("RunMode"); runMode != "" {
BConfig.RunMode = runMode
}
if sd := ac.String("StaticDir"); sd != "" {
BConfig.WebConfig.StaticDir = map[string]string{}
sds := strings.Fields(sd)
for _, v := range sds {
if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[1]
} else {
BConfig.WebConfig.StaticDir["/"+strings.Trim(url2fsmap[0], "/")] = url2fsmap[0]
}
}
}
if sgz := ac.String("StaticExtensionsToGzip"); sgz != "" {
extensions := strings.Split(sgz, ",")
fileExts := []string{}
for _, ext := range extensions {
ext = strings.TrimSpace(ext)
if ext == "" {
continue
}
if !strings.HasPrefix(ext, ".") {
ext = "." + ext
}
fileExts = append(fileExts, ext)
}
if len(fileExts) > 0 {
BConfig.WebConfig.StaticExtensionsToGzip = fileExts
}
}
if lo := ac.String("LogOutputs"); lo != "" {
// if lo is not nil or empty
// means user has set his own LogOutputs
// clear the default setting to BConfig.Log.Outputs
BConfig.Log.Outputs = make(map[string]string)
los := strings.Split(lo, ";")
for _, v := range los {
if logType2Config := strings.SplitN(v, ",", 2); len(logType2Config) == 2 {
BConfig.Log.Outputs[logType2Config[0]] = logType2Config[1]
} else {
continue
}
}
}
//init log
logs.Reset()
for adaptor, config := range BConfig.Log.Outputs {
err := logs.SetLogger(adaptor, config)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Sprintf("%s with the config %q got err:%s", adaptor, config, err.Error()))
}
}
logs.SetLogFuncCall(BConfig.Log.FileLineNum)
return nil
}
func assignSingleConfig(p interface{}, ac config.Configer) {
pt := reflect.TypeOf(p)
if pt.Kind() != reflect.Ptr {
return
}
pt = pt.Elem()
if pt.Kind() != reflect.Struct {
return
}
pv := reflect.ValueOf(p).Elem()
for i := 0; i < pt.NumField(); i++ {
pf := pv.Field(i)
if !pf.CanSet() {
continue
}
name := pt.Field(i).Name
switch pf.Kind() {
case reflect.String:
pf.SetString(ac.DefaultString(name, pf.String()))
case reflect.Int, reflect.Int64:
pf.SetInt(ac.DefaultInt64(name, pf.Int()))
case reflect.Bool:
pf.SetBool(ac.DefaultBool(name, pf.Bool()))
case reflect.Struct:
default:
//do nothing here
}
}
}
// LoadAppConfig allow developer to apply a config file
func LoadAppConfig(adapterName, configPath string) error {
absConfigPath, err := filepath.Abs(configPath)
if err != nil {
return err
}
if !utils.FileExists(absConfigPath) {
return fmt.Errorf("the target config file: %s don't exist", configPath)
}
appConfigPath = absConfigPath
appConfigProvider = adapterName
return parseConfig(appConfigPath)
}
type beegoAppConfig struct {
innerConfig config.Configer
}
func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, error) {
ac, err := config.NewConfig(appConfigProvider, appConfigPath)
if err != nil {
return nil, err
}
return &beegoAppConfig{ac}, nil
}
func (b *beegoAppConfig) Set(key, val string) error {
if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil {
return err
}
return b.innerConfig.Set(key, val)
}
func (b *beegoAppConfig) String(key string) string {
if v := b.innerConfig.String(BConfig.RunMode + "::" + key); v != "" {
return v
}
return b.innerConfig.String(key)
}
func (b *beegoAppConfig) Strings(key string) []string {
if v := b.innerConfig.Strings(BConfig.RunMode + "::" + key); len(v) > 0 {
return v
}
return b.innerConfig.Strings(key)
}
func (b *beegoAppConfig) Int(key string) (int, error) {
if v, err := b.innerConfig.Int(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Int(key)
}
func (b *beegoAppConfig) Int64(key string) (int64, error) {
if v, err := b.innerConfig.Int64(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Int64(key)
}
func (b *beegoAppConfig) Bool(key string) (bool, error) {
if v, err := b.innerConfig.Bool(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Bool(key)
}
func (b *beegoAppConfig) Float(key string) (float64, error) {
if v, err := b.innerConfig.Float(BConfig.RunMode + "::" + key); err == nil {
return v, nil
}
return b.innerConfig.Float(key)
}
func (b *beegoAppConfig) DefaultString(key string, defaultVal string) string {
if v := b.String(key); v != "" {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultStrings(key string, defaultVal []string) []string {
if v := b.Strings(key); len(v) != 0 {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt(key string, defaultVal int) int {
if v, err := b.Int(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultInt64(key string, defaultVal int64) int64 {
if v, err := b.Int64(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultBool(key string, defaultVal bool) bool {
if v, err := b.Bool(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DefaultFloat(key string, defaultVal float64) float64 {
if v, err := b.Float(key); err == nil {
return v
}
return defaultVal
}
func (b *beegoAppConfig) DIY(key string) (interface{}, error) {
return b.innerConfig.DIY(key)
}
func (b *beegoAppConfig) GetSection(section string) (map[string]string, error) {
return b.innerConfig.GetSection(section)
}
func (b *beegoAppConfig) SaveConfigFile(filename string) error {
return b.innerConfig.SaveConfigFile(filename)
}
| [
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\""
]
| []
| [
"BEEGO_RUNMODE"
]
| [] | ["BEEGO_RUNMODE"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.