id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
3438827
|
import sys
import numpy as np
from functools import partial
from PyQt5.QtWidgets import QApplication, QWidget, QSystemTrayIcon, QPushButton, QDesktopWidget, QLabel, QAction, QMainWindow
from PyQt5.QtGui import QIcon, QImage, QPainter
from PyQt5.QtCore import QPointF, Qt
from board import Board, BOARDSIZE
from brain import Brain
from difficulty import Difficulty
SQUARESIZE = 100
class Four(QMainWindow):
def __init__(self, board):
super().__init__()
self.board = board
self.gameover = False
self.difficulty = Difficulty.MEDIUM
self.players = [
{"name": "Player", "ai": False, "piece": 1},
{"name": "AI", "ai": True, "brain": Brain(board, self.difficulty.value), "piece": 2},
]
self.currentPlayer = -1
self.initUI()
self.startNextTurn()
def initUI(self):
self.setGeometry(0, 0,1280, 760)
self.center()
self.setWindowTitle('FOUR')
self.setWindowIcon(QIcon('assets/icon.png'))
mainMenu = self.menuBar()
gameMenu = mainMenu.addMenu('Game')
newGameButton = QAction(QIcon('assets/reload.png'), 'Restart', self)
newGameButton.setShortcut('Ctrl+R')
newGameButton.setStatusTip('Restart')
newGameButton.triggered.connect(partial(self.restart, self.difficulty))
gameMenu.addAction(newGameButton)
exitButton = QAction(QIcon('assets/exit.png'), 'Exit', self)
exitButton.setShortcut('Ctrl+Q')
exitButton.setStatusTip('Exit game')
exitButton.triggered.connect(self.close)
gameMenu.addAction(exitButton)
self.difficulylabel = QLabel(self)
self.difficulylabel.setStyleSheet("font-family: Roboto, sans-serif; font-size: 30px;")
self.difficulylabel.setText("Opponent: " + self.difficulty.name.upper())
self.difficulylabel.resize(450, 40)
self.difficulylabel.move(780, 110)
self.logbox = QLabel(self)
self.logbox.setStyleSheet("font-family: Consolas, sans-serif; font-size: 15px; border: 1px solid grey; border-radius: 2px;")
self.logbox.resize(450, 540)
self.logbox.move(780, 160)
self.logbox.setAlignment(Qt.AlignTop)
self.statsbox = QLabel(self)
self.statsbox.setStyleSheet("font-family: Consolas, sans-serif; border-top: 1px solid grey;")
self.statsbox.resize(1280, 40)
self.statsbox.move(0, 720)
self.update_stats()
self.marked_cells = []
self.drawRowButtons()
self.drawDifficultyButtons()
self.log("Game started on difficulty %s\t[Depth: %d]" % (self.difficulty.name, self.difficulty.value))
self.show()
# Draw buttons to add a piece to a column
def drawRowButtons(self):
for i in range(0, BOARDSIZE[1]):
btn = QPushButton('V', self)
btn.setToolTip('Drop piece')
btn.resize(SQUARESIZE-20, 40)
btn.move(10+(SQUARESIZE*i)+10, 40)
btn.clicked.connect(partial(self.drop, i))
# Draw buttons to start new game with chosen difficulty
def drawDifficultyButtons(self):
i = 0
for d in Difficulty:
btn = QPushButton(d.name, self)
btn.setToolTip("Start " + d.name)
btn.resize(SQUARESIZE-20, 40)
btn.move(780 + ((SQUARESIZE-8)*i), 40)
btn.clicked.connect(partial(self.restart, d))
i += 1
# Center window
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
# Start a new game
def restart(self, newdiff=None):
if not self.gameover:
self.log("Game aborted.")
if not newdiff == None:
self.difficulty = newdiff
self.update_stats()
self.difficulylabel.setText("Opponent: " + self.difficulty.name.upper())
self.board.reset()
self.gameover = False
self.players[1]["brain"] = Brain(board, self.difficulty.value)
self.currentPlayer = -1
self.repaint()
self.log("New game started on difficulty %s\t[Depth: %d]" % (self.difficulty.name, self.difficulty.value))
self.startNextTurn()
# Log given message to the logbox (right side of window)
def log(self, msg):
if not msg == None and not msg == "":
self.logbox.setText(msg + "\n" + self.logbox.text())
# Drop piece of current player in given col
def drop(self, col):
if self.gameover:
return
current_player = self.players[self.currentPlayer]
pieceDropped = self.board.dropPiece(col, current_player["piece"])
if pieceDropped:
# Print drop in logbox
logline = current_player["name"] + ": drop @ " + str(col)
if current_player["ai"] == True:
logline += "\t\t[Pruned " + str(current_player["brain"].prune_num) + " branches]"
self.log(logline)
self.marked_cells = [self.board.last_dropped_at(col)]
self.repaint()
self.onTurnEnded(col)
def onTurnEnded(self, col):
# check for game over
winning_config = self.board.testForWin(self.players[self.currentPlayer]["piece"], col)
self.gameover = (not winning_config == None)
if self.gameover:
self.mark_cells(winning_config)
self.log("\nWINNER: " + self.players[self.currentPlayer]["name"] + "\n")
if self.players[self.currentPlayer]["ai"] == True:
self.add_ai_stat(1,0,0)
else:
self.add_ai_stat(0,0,1)
elif self.board.completed(): # tie game
self.gameover = True
self.marked_cells = []
for r in range(BOARDSIZE[0]):
for c in range(BOARDSIZE[1]):
self.marked_cells.append((r, c))
self.log("\nIT'S A TIE!\n")
self.add_ai_stat(0,1,0)
else:
self.startNextTurn()
# Select next player as current player and execute AI move when needed
def startNextTurn(self):
self.currentPlayer = (self.currentPlayer + 1) % len(self.players)
if self.players[self.currentPlayer]["ai"]:
ai_brain = self.players[self.currentPlayer]["brain"]
next_move = ai_brain.calculateMove()
self.drop(next_move)
# Mark winning cells when game has ended
def mark_cells(self, cells):
self.marked_cells = []
for i in range(0, 4):
self.marked_cells.append((cells[0]+cells[2]*i, cells[1]+cells[3]*i))
self.log("Winning configuration found: %s -> %s" % (str(self.marked_cells[0]), str(self.marked_cells[-1])))
self.repaint()
# Redraw the grid
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
for row in range(0, BOARDSIZE[0]):
for col in range(0, BOARDSIZE[1]):
if self.board.getPieceAt(row, col) == 1:
if self.marked_cells.__contains__((row, col)):
qp.drawImage(QPointF(10+col*SQUARESIZE,100+row*SQUARESIZE), QImage("assets/square_red_special.png"))
else:
qp.drawImage(QPointF(10+col*SQUARESIZE,100+row*SQUARESIZE), QImage("assets/square_red.png"))
elif self.board.getPieceAt(row, col) == 2:
if self.marked_cells.__contains__((row, col)):
qp.drawImage(QPointF(10+col*SQUARESIZE,100+row*SQUARESIZE), QImage("assets/square_yellow_special.png"))
else:
qp.drawImage(QPointF(10+col*SQUARESIZE,100+row*SQUARESIZE), QImage("assets/square_yellow.png"))
else:
qp.drawImage(QPointF(10+col*SQUARESIZE,100+row*SQUARESIZE), QImage("assets/square.png"))
qp.end()
# Stats
def add_ai_stat(self, win, tie, loss):
self.stats[self.difficulty.name][0] += win
self.stats[self.difficulty.name][1] += tie
self.stats[self.difficulty.name][2] += loss
self.write_stats()
self.update_stats()
def update_stats(self):
self.read_stats()
diffstats = self.stats[self.difficulty.name]
self.statsbox.setText("\tDifficulty: %s\tAI wins: %d\tTies: %d\t\tAI losses: %d" % (self.difficulty.name, diffstats[0], diffstats[1], diffstats[2]))
def write_stats(self):
file = open("stats.sam","w")
for s in self.stats:
file.write(s+"$"+str(self.stats[s][0])+"$"+str(self.stats[s][1])+"$"+str(self.stats[s][2])+"\n")
file.close()
def read_stats(self):
self.stats = {}
with open('stats.sam') as fp:
for line in fp:
stats = line.split("$")
self.stats[stats[0]] = [int(stats[1]), int(stats[2]), int(stats[3])]
if __name__ == '__main__':
app = QApplication(sys.argv)
board = Board()
game = Four(board)
sys.exit(app.exec_())
|
StarcoderdataPython
|
129964
|
__all__ = ['BaseController']
import json
from pyramid.renderers import render
from pyramid.view import view_config
from horus.views import BaseController
@view_config(http_cache=(0, {'must-revalidate': True}),
renderer='templates/embed.txt', route_name='embed')
def embed(request, standalone=True):
if standalone:
request.response.content_type = 'application/javascript'
request.response.charset = 'UTF-8'
return {
pkg: json.dumps(request.webassets_env[pkg].urls())
for pkg in ['inject', 'jquery', 'raf']
}
@view_config(renderer='templates/home.pt', route_name='index')
def home(request):
return {
'embed': render('templates/embed.txt', embed(request, False), request)
}
def includeme(config):
config.add_view(
'horus.views.AuthController',
attr='login',
renderer='h:templates/auth.pt',
route_name='login'
)
config.add_view(
'horus.views.AuthController',
attr='logout',
route_name='logout'
)
config.add_view(
'horus.views.ForgotPasswordController',
attr='forgot_password',
renderer='h:templates/auth.pt',
route_name='forgot_password'
)
config.add_view(
'horus.views.ForgotPasswordController',
attr='reset_password',
renderer='h:templates/auth.pt',
route_name='reset_password'
)
config.add_view(
'horus.views.RegisterController',
attr='register',
renderer='h:templates/auth.pt',
route_name='register'
)
config.add_view(
'horus.views.RegisterController',
attr='activate',
renderer='h:templates/auth.pt',
route_name='activate'
)
config.add_view(
'horus.views.ProfileController',
attr='profile',
renderer='h:templates/auth.pt',
route_name='profile'
)
config.scan(__name__)
|
StarcoderdataPython
|
4952470
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Based on https://github.com/V-E-O/PoC/tree/8c389899e6c4e16b2ddab9ba6d77c2696577366f/CVE-2017-13156
import os
import sys
import struct
import hashlib
import zipfile
import argparse
from zlib import adler32
verbosity = 0
def verbose_log(level, message):
if level <= verbosity:
print(message)
def update_checksum(data):
# Update SHA1 (20 bytes)
m = hashlib.sha1()
m.update(data[32:])
data[12:32] = m.digest()
# Update Adler32 (8 bytes)
v = adler32(memoryview(data[12:])) & 0xffffffff
data[8:12] = struct.pack('<L', v)
def main():
global verbosity
# Setup command arguments
parser = argparse.ArgumentParser(description='Inject custom code or custom data to an APK.')
parser.add_argument('-d', '--dex', action='store_true', help='use this flag to correct the input DEX\'s checksums.')
parser.add_argument('-v', '--verbosity', action='count', help='increase output verbosity (e.g., -vv is more than -v)')
parser.add_argument('input_data', help='this can be a DEX file or custom data, like a TXT file.')
parser.add_argument('input_apk', help='the APK you want to inject the custom_data into.')
parser.add_argument('output_apk', help='the output APK filename.')
args = parser.parse_args()
# Load arguments
input_data_file = args.input_data
input_apk_file = args.input_apk
output_apk_file = args.output_apk
if args.verbosity is not None:
verbosity = args.verbosity
# Check if input APK is a ZIP file
if not zipfile.is_zipfile(input_apk_file):
print("\"{}\" is not a APK/ZIP file.".format(input_apk_file))
exit(1)
# Load input data file
with open(input_data_file, 'rb') as f:
verbose_log(1, 'Reading data from {}...'.format(input_data_file))
input_data = bytearray(f.read())
input_data_size = len(input_data)
# Load target APK file
with open(input_apk_file, 'rb') as f:
apk_data = bytearray(f.read())
# Find Central Directory end address
cd_end_addr = apk_data.rfind(b'\x50\x4b\x05\x06')
verbose_log(1, 'Central Directory end address: {}'.format(cd_end_addr))
# Find Central Directory start address
cd_start_addr = struct.unpack('<L', apk_data[cd_end_addr+16:cd_end_addr+20])[0]
verbose_log(1, 'Central Directory start address: {}'.format(cd_start_addr))
# Offset address
new_data = struct.pack('<L', cd_start_addr + input_data_size)
verbose_log(2, 'Data modified from "{}" to "{}"'.format(bytes(apk_data[cd_end_addr+16:cd_end_addr+20]), new_data))
apk_data[cd_end_addr+16:cd_end_addr+20] = new_data
# Offset all remaining addresses
pos = cd_start_addr
while pos < cd_end_addr:
offset = struct.unpack('<L', apk_data[pos+42:pos+46])[0]
new_data = struct.pack('<L', offset + input_data_size)
verbose_log(2, 'Data modified from "{}" to "{}"'.format(bytes(apk_data[pos+16:pos+20]), new_data))
apk_data[pos+42:pos+46] = new_data
pos = apk_data.find(b'\x50\x4b\x01\x02', pos+46, cd_end_addr)
if pos == -1:
break
# Merge data
out_data = input_data + apk_data
# Fix checksum
if args.dex:
verbose_log(1, 'Fixing DEX checksum...')
out_data[32:36] = struct.pack('<L', len(out_data))
update_checksum(out_data)
# Export APK
with open(output_apk_file, 'wb') as f:
verbose_log(1, 'Saving injected APK to {}...'.format(output_apk_file))
f.write(out_data)
print('Successfully generated {}.'.format(output_apk_file))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
5011782
|
<reponame>yaccri/python-domintell
"""
:author: <NAME> <<EMAIL>>
"""
import domintell
class Ping(domintell.Command):
"""
send: &PING message
"""
def __init__(self):
domintell.Command.__init__(self)
def command(self):
return "&PING"
|
StarcoderdataPython
|
11316956
|
<filename>neurotic/tangles/sandbox.py
from io import StringIO
import sys
def sandbox(code: str, block_globals: bool=False,
block_locals: bool=False) -> tuple:
"""Runs the code-string and captures any errors
Args:
code: executable string
block_globals: if True don't use global namespace
block_ locals: if True don't use local namespace
Returns:
output, stderr, and any exception code
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
redirected_output = sys.stdout = StringIO()
redirected_error = sys.stderr = StringIO()
namespace_globals = {} if block_globals else globals()
namespace_locals = {} if block_locals else locals()
output, error, exception = None, None, None
try:
exec(code, namespace_globals, namespace_locals)
except:
import traceback
exception = traceback.format_exc()
output = redirected_output.getvalue()
error = redirected_error.getvalue()
# reset outputs to the original values
sys.stdout = old_stdout
sys.stderr = old_stderr
return output, error, exception
|
StarcoderdataPython
|
8042113
|
#!/usr/bin/env python
import octavo
|
StarcoderdataPython
|
1628259
|
# -*- coding: UTF-8 -*-
# pep8: disable-msg=E501
# pylint: disable=C0301
import os
import logging
import getpass
import tempfile
__version__ = '0.0.5'
__author__ = '<NAME>'
__author_username__ = 'marco.lovato'
__author_email__ = '<EMAIL>'
__description__ = 'A command-line tool to create projects \
from templates, to start your python work.'
log_filename = os.path.join(tempfile.gettempdir(),
'machete-' + getpass.getuser() + '.log')
log = logging
log.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename=log_filename,
filemode='a')
def __path(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
# Jenkins
if os.getenv("BUILD_NUMBER"):
file_ = open(__path('build.info'), 'w')
file_.write(os.getenv("TRAVIS_BUILD_NUMBER"))
file_.close()
# Travis
if os.getenv("TRAVIS_BUILD_NUMBER"):
file_ = open(__path('build.info'), 'w')
file_.write(os.getenv("TRAVIS_BUILD_NUMBER"))
file_.close()
__build__ = '0'
if os.path.exists(__path('build.info')):
__build__ = open(__path('build.info')).read().strip()
__version__ = __version__ + '.' + __build__
|
StarcoderdataPython
|
11392019
|
<gh_stars>0
import logging
import math
import numpy as np
def dodi2wt(Do, Di):
"""Calculate pipe wall thickness from outer diameter and inner diameter.
"""
return (Do - Di) / 2
def dowt2di(Do, WT):
"""Calculate pipe inner diameter from outer diameter and wall thickness.
"""
return Do - 2 * WT
def diwt2do(Di, WT):
"""Calculate pipe outer diameter from inner diameter and wall thickness.
"""
return Di + 2 * WT
def dodiwt(*, Do=None, Di=None, WT=None):
"""Calculate pipe wall thickness / outer diameter / inner diameter.
"""
if Do is not None and Di is not None and WT is not None:
assert Do==Di+2*WT, f"pipe_D_WT: inconsistent pipe dimensions Do={Do} Di={Di} WT={WT}."
elif WT is None:
WT = (Do - Di) / 2
elif Di is None:
Di = Do - 2 * WT
elif Do is None:
Do = Di + 2 * WT
else:
return False
return Do, Di, WT
def dodi2CSA(Do, Di):
"""Calculate pipe cross sectional area, given the pipe outer diamater and inner diameter.
:param Do: pipe outer diameter :math:`(D_o)`
:type p_d: float
:param Di: pipe inner diameter :math:`(D_i)`
:type p_d: float
:returns: pipe cross sectional area
:rtype: float
.. math::
A = \frac{\pi}{4} \left( D_o^2 - D_i^2 \right)
.. doctest::
>>> dodi2CSA(0.6656, 0.6172)
0.19522098526377624
"""
CSA = math.pi / 4 * (Do*Do - Di*Di)
return CSA
def pipe_unit_mass(ρ, CSA):
"""Calculate pipe unit mass (mass/length).
"""
# if CSA is None:
# CSA = dodi2CSA(Do=Do, Di=Di, WT=WT)
return CSA * ρ
def pipe_unit_wgt(umass, g=9.806650):
"""Calculate pipe unit weight (weight/length).
"""
# if umass is None:
# umass = calc_pipe_umass(pipe_ρ, Do=Do, Di=Di, WT=WT)
return umass * g
def pipe_unit_subwgt(Dbuoy, seawater_ρ, uwgt, g=9.806650):
"""Calculate pipe unit submerged weight (weight/length).
"""
# if uwgt is None:
# uwgt = calc_pipe_uwgt(g, Do=Do, Di=Di, WT=WT, umass=umass, pipe_ρ=pipe_ρ)
usubwgt = uwgt - np.pi/4*Dbuoy**2 * seawater_ρ * g
return usubwgt
def pipe_equiv_layers(layers, *, Di_ref=None, Do_ref=None, umass=0,
returnDict=False):
"""calculate equivalent properties for stacked pipe layers.
:param layers: list of layer properties, each element is
a tuple consisting of (layer_thickness, layer_mass_density)
The first layer is the layer at Do_ref|Di_ref. Subsequent layers
are ordered outwards (increasing D) when Di_ref is reference diameter.
Layers are ordered inwards when Do_ref is reference diameter (decreasing D).
:type layers: list, tuple
:returns: tuple with equivalent properties (density, umass, Do, Di, WT)
:rtype: tuple
.. doctest::
>>> layers = [(0.0003, 1450.), (0.0038, 960.), (0.045, 2250.)]
>>> pipe_layers(layers, Di_ref=0.660, umass=337.0)
(5232.900238245189, 0.0491)
"""
#if (Di is not None) and (Do is not None):
if len([None for x in [Di_ref, Do_ref] if x is None]) != 1:
raise ValueError(f"pipe_layers: arguments not correctly specified: Di_ref={Di_ref}, Do_ref={Do_ref}")
#alayers = np.array(layers, dtype=[('WT', 'f4'), ('density', 'f4')])
alayers = layers
#print(alayers)
if Do_ref:
# Di_ref = Do_ref - np.sum(alayers["WT"]) * 2
# alayers = alayers[::-1]
Di_ref = sum([x for x,y in layers])
alayers = layers[::-1]
WT_total = 0.0
equiv_umass = umass
layer_Di = Di_ref
for layer in alayers:
#print(layer)
layer_Do = layer_Di + 2*layer[0]
WT_total += layer[0]
_csa = np.pi/4 * (np.power(layer_Do,2) - np.power(layer_Di,2))
equiv_umass += _csa * layer[1]
layer_Di = layer_Do
if Do_ref is None:
Do_ref = Di_ref + 2 * WT_total
_csa = np.pi/4 * (np.power(Do_ref,2) - np.power(Di_ref,2))
equiv_ρ = equiv_umass / _csa
#return (equiv_density, equiv_umass, Do_ref, Di_ref, WT_total)
if returnDict:
return {
"equiv_ρ": equiv_ρ,
"umass": equiv_umass,
"Do": Do_ref,
"Di": Di_ref,
"WT": WT_total
}
else:
return (equiv_ρ, equiv_umass, Do_ref, Di_ref, WT_total)
# def calc_pipeline_usubwgt(Do, WT, pipe_ρ, coating_layers):
# Do, Di, WT = calc_pipe_Do_Di_WT(Do=Do, WT=WT)
# CSA = calc_pipe_CSA(Do, Di)
# umass = calc_pipe_umass(CSA, pipe_ρ)
# joint_mass = umass * length
# uwgt = pdover2t.pipe.calc_pipe_uwgt(umass, g)
# usubwgt = pdover2t.pipe.calc_pipe_usubwgt(Do, seawater_ρ, g, uwgt=uwgt)
# joint_subwgt = usubwgt * length
# layersObj = pdover2t.pipe.calc_pipe_layers(coating_layers, Di_ref=Do, umass=umass)
# pl_umass = layersObj[1]
# pl_Do = layersObj[2]
# pl_uwgt = pdover2t.pipe.calc_pipe_uwgt(pl_umass, g)
# pl_usubwgt = pdover2t.pipe.calc_pipe_usubwgt(pl_Do, seawater_ρ, g, uwgt=pl_uwgt)
# return pl_usubwgt
|
StarcoderdataPython
|
8127699
|
from puma.attribute import ThreadAction
class SharingAttributeBetweenScopesNotAllowedError(TypeError):
def __init__(self, attribute_name: str, scope_type: str, action_type: str) -> None:
super().__init__(f"Attribute '{attribute_name}' may not be passed between {scope_type} as its {action_type} is '{ThreadAction.NOT_ALLOWED.name}'")
class SharingAttributeBetweenThreadsNotAllowedError(SharingAttributeBetweenScopesNotAllowedError):
def __init__(self, attribute_name: str) -> None:
super().__init__(attribute_name, "Threads", "ThreadAction")
class SharingAttributeBetweenProcessesNotAllowedError(SharingAttributeBetweenScopesNotAllowedError):
def __init__(self, attribute_name: str) -> None:
super().__init__(attribute_name, "Processes", "ProcessAction")
|
StarcoderdataPython
|
4865296
|
import prep1 as py
import pandas as pd
from sklearn import preprocessing
from sklearn.feature_extraction.text import CountVectorizer
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import pickle
df=pd.read_csv("youtubedata.csv")
df.dropna(axis=0, how="any", thresh=None, subset=None, inplace=True)
label_encoder = preprocessing.LabelEncoder()
df_category = label_encoder.fit_transform(df["category"].values)
print("P")
f=open("corpus.txt","r")
cor=f.read().splitlines()
f.close()
cor=cor[0]
corpus = py.corpus_build(df["title"].values)
corpus1 = py.corpus_desc(df["description"].values)
X,y =py.vector(corpus,corpus1,cor,df)
print("Q")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
classifier = RandomForestClassifier(n_estimators = 1000, criterion = 'entropy')
classifier.fit(X_train, y_train)
print("O")
filename = 'finalized_model2.sav'
pickle.dump(classifier, open(filename, 'wb'))
y_pred = classifier.predict(X_test)
print(classifier.score(X_test, y_test))
|
StarcoderdataPython
|
1628581
|
import logging
import unittest
import requests
from configcatclient import DataGovernance
try:
from unittest import mock
except ImportError:
import mock
try:
from unittest.mock import Mock, ANY
except ImportError:
from mock import Mock, ANY
from configcatclient.configfetcher import ConfigFetcher
logging.basicConfig(level=logging.WARN)
test_json = {"test": "json"}
class MockHeader:
def __init__(self, etag):
self.etag = etag
def get(self, name):
if name == 'Etag':
return self.etag
return None
class MockResponse:
def __init__(self, json_data, status_code, etag=None):
self.json_data = json_data
self.status_code = status_code
self.headers = MockHeader(etag)
def json(self):
return self.json_data
def raise_for_status(self):
if 200 <= self.status_code < 300 or self.status_code == 304:
return
raise Exception(self.status_code)
# An organization with Global data_governance config.json representation
def mocked_requests_get_global(*args, **kwargs):
if args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 0
},
"f": test_json
}, 200)
elif args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 0
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# An organization with EuOnly data_governance config.json representation
def mocked_requests_get_eu_only(*args, **kwargs):
if args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-eu.configcat.com",
"r": 1
},
"f": {}
}, 200)
elif args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-eu.configcat.com",
"r": 0
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# An organization with Global data_governance config.json representation with custom baseurl
def mocked_requests_get_custom(*args, **kwargs):
if args[0] == 'https://custom.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 0
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# Redirect loop in config.json
def mocked_requests_get_redirect_loop(*args, **kwargs):
if args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-eu.configcat.com",
"r": 1
},
"f": test_json
}, 200)
elif args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://cdn-global.configcat.com",
"r": 1
},
"f": test_json
}, 200)
return MockResponse(None, 404)
# An organization with forced=2 redirection config.json representation
def mocked_requests_get_forced_2(*args, **kwargs):
if args[0] == 'https://custom.configcat.com/configuration-files//config_v5.json' \
or args[0] == 'https://cdn-global.configcat.com/configuration-files//config_v5.json' \
or args[0] == 'https://cdn-eu.configcat.com/configuration-files//config_v5.json'\
or args[0] == 'https://forced.configcat.com/configuration-files//config_v5.json':
return MockResponse({
"p": {
"u": "https://forced.configcat.com",
"r": 2
},
"f": test_json
}, 200)
return MockResponse(None, 404)
call_to_global = mock.call('https://cdn-global.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
call_to_eu = mock.call('https://cdn-eu.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
call_to_custom = mock.call('https://custom.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
call_to_forced = mock.call('https://forced.configcat.com/configuration-files//config_v5.json',
auth=ANY, headers=ANY, proxies=ANY, timeout=ANY)
class DataGovernanceTests(unittest.TestCase):
@mock.patch('requests.get', side_effect=mocked_requests_get_global)
def test_sdk_global_organization_global(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# and the second should call https://cdn-global.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_global, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_global)
def test_sdk_eu_organization_global(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# and the second should call https://cdn-global.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_global, mock_get.call_args_list[1])
@mock.patch('requests.get', side_effect=mocked_requests_get_eu_only)
def test_sdk_global_organization_eu_only(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# with an immediate redirect to https://cdn-eu.configcat.com
# and the second should call https://cdn-eu.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
self.assertEqual(call_to_eu, mock_get.call_args_list[2])
@mock.patch('requests.get', side_effect=mocked_requests_get_eu_only)
def test_sdk_eu_organization_eu_only(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# and the second should call https://cdn-eu.configcat.com
# without redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
@mock.patch('requests.get', side_effect=mocked_requests_get_custom)
def test_sdk_global_custom_base_url(self, mock_get):
# In this case
# the first invocation should call https://custom.configcat.com
# and the second should call https://custom.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global,
base_url='https://custom.configcat.com')
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_custom, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_custom)
def test_sdk_eu_custom_base_url(self, mock_get):
# In this case
# the first invocation should call https://custom.configcat.com
# and the second should call https://custom.configcat.com
# without force redirects
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly,
base_url='https://custom.configcat.com')
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 1)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_custom, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_forced_2)
def test_sdk_global_forced(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# with an immediate redirect to https://forced.configcat.com
# and the second should call https://forced.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertEqual(call_to_forced, mock_get.call_args_list[2])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_forced_2)
def test_sdk_eu_forced(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# with an immediate redirect to https://forced.configcat.com
# and the second should call https://forced.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.EuOnly)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_eu, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertEqual(call_to_forced, mock_get.call_args_list[2])
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_forced_2)
def test_sdk_base_url_forced(self, mock_get):
# In this case
# the first invocation should call https://cdn-eu.configcat.com
# with an immediate redirect to https://forced.configcat.com
# and the second should call https://forced.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global,
base_url='https://custom.configcat.com')
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 2)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_custom, mock_get.call_args_list[0])
self.assertEqual(call_to_forced, mock_get.call_args_list[1])
self.assertEqual(call_to_forced, mock_get.call_args_list[2])
self.assertNotIn(call_to_eu, mock_get.call_args_list)
self.assertNotIn(call_to_global, mock_get.call_args_list)
@mock.patch('requests.get', side_effect=mocked_requests_get_redirect_loop)
def test_sdk_redirect_loop(self, mock_get):
# In this case
# the first invocation should call https://cdn-global.configcat.com
# with an immediate redirect to https://cdn-eu.configcat.com
# with an immediate redirect to https://cdn-global.configcat.com
# the second invocation should call https://cdn-eu.configcat.com
# with an immediate redirect to https://cdn-global.configcat.com
# with an immediate redirect to https://cdn-eu.configcat.com
fetcher = ConfigFetcher(sdk_key='', mode='m', data_governance=DataGovernance.Global)
# First fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 3)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
self.assertEqual(call_to_global, mock_get.call_args_list[2])
# Second fetch
fetch_response = fetcher.get_configuration_json()
self.assertTrue(fetch_response.is_fetched())
self.assertEqual(test_json, fetch_response.json().get('f'))
self.assertEqual(len(mock_get.call_args_list), 6)
self.assertEqual(call_to_global, mock_get.call_args_list[0])
self.assertEqual(call_to_eu, mock_get.call_args_list[1])
self.assertEqual(call_to_global, mock_get.call_args_list[2])
self.assertEqual(call_to_eu, mock_get.call_args_list[3])
self.assertEqual(call_to_global, mock_get.call_args_list[4])
self.assertEqual(call_to_eu, mock_get.call_args_list[5])
|
StarcoderdataPython
|
12817337
|
<reponame>zan73/telstra-smart-modem
# Class with helper methods to represent the devices connected to or seen by the modem.
# This class is returned from Modem.getDevices() and can't be used by itself.
import ipaddress
import re
import bs4
import json
# Compiled regular expressions:
re_mac = re.compile(r"(?:[0-9a-f]{2}[:]){5}[0-9a-f]{2}")
re_ipv4 = re.compile(r"(?:[0-9]{1,3}[.]){3}[0-9]{1,3}")
re_ipv6_full = re.compile(r"(?:[0-9a-f]{1,4}:){7}[0-9a-f]{1,4}")
# Parse the html table from Modem.getDevices()
def parseDevices(soup):
headers = ['online', 'hostname', 'ip', 'mac', 'connection', 'eth-port']
def extractOnlineStatus(td: bs4.element.Tag) -> bool:
status = td.div['class'][1]
if status == "green":
return True
else:
return False
def extractEthPort(possiblePort: str):
if possiblePort:
return int(possiblePort)
def extractIPV4(ips: str):
if ips:
ipv4 = re_ipv4.findall(ips)
if ipv4:
return ipaddress.IPv4Address(ipv4[0])
def extractIPV6(ips: str):
if ips:
ipv6s = re_ipv6_full.findall(ips)
return [ipaddress.IPv6Address(ipv6) for ipv6 in ipv6s]
else:
return []
html_table = soup.tbody.find_all('tr')
devices = []
for row in html_table:
cols = row.find_all('td')
device = {}
for index, item in enumerate(cols):
header = headers[index]
if index == 0:
device[header] = extractOnlineStatus(item)
elif index == 2:
device['ipv4'] = extractIPV4(item.string)
device['ipv6'] = extractIPV6(item.string)
elif index == 5:
device[header] = extractEthPort(item.string)
else:
device[header] = item.string
devices.append(device)
return devices
# Validate a supplied MAC address and convert it to the correct format.
def validateMAC(mac: str) -> str:
valid_mac = mac.lower()
valid_mac = valid_mac.replace("-", ":", 5)
if not re_mac.fullmatch(valid_mac):
raise ValueError(f"Invalid MAC address: '{mac}'")
return valid_mac
class Devices:
def __init__(self, soup, LH1000 = False):
if not LH1000:
self.devices = parseDevices(soup)
else:
self.devices = json.loads(soup.replace('station_info=',''))['stations']
for dev in self.devices:
dev['mac'] = dev.pop('station_mac').lower()
dev['online'] = dev['online'] == '1'
dev['hostname'] = dev.pop('station_name')
dev['connection'] = dev.pop('connect_type')
# Get a specific device's details from it's MAC address.
def getDevice(self, mac: str) -> dict:
valid_mac = validateMAC(mac)
for device in self.devices:
if device['mac'] == valid_mac:
return device
# Check if a device is currently connected to the modem by MAC address.
def isOnline(self, mac: str) -> bool:
device = self.getDevice(mac)
if device:
return device['online']
else:
return False
|
StarcoderdataPython
|
4958318
|
<reponame>DaveLorenz/FlaskDeepLearningHamSpam<filename>Flask application/main.py
# load packages
import os
import flask
app = flask.Flask(__name__)
from flask import Flask, render_template, request
#load model preprocessing
import numpy as np
import pandas as pd
import pickle
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import keras.models
from keras.models import model_from_json
# Load tokenizer for preprocessing
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
# Load pre-trained model into memory
json_file = open('model.json','r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# Load weights into new model
loaded_model.load_weights("model.h5")
def prepData(text):
# Convert to array
textDataArray = [text]
# Convert into list with word ids
Features = tokenizer.texts_to_sequences(textDataArray)
Features = pad_sequences(Features, 20, padding='post')
return Features
loaded_model.compile(optimizer="Adam",loss='binary_crossentropy',metrics=['accuracy'])
# define a predict function as an endpoint
@app.route('/', methods=['GET','POST'])
def predict():
#whenever the predict method is called, we're going
#to input the user entered text into the model
#and return a prediction
if request.method=='POST':
textData = request.form.get('text_entered')
Features = prepData(textData)
prediction = int((np.asscalar(loaded_model.predict(Features)))*100)
return render_template('prediction.html', prediction=prediction)
else:
return render_template("search_page.html")
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080)
|
StarcoderdataPython
|
11322389
|
<gh_stars>1-10
import os
import functools
import collections
from typing import Optional, List
from PIL import Image
import torchvision
from ..transforms import stack
from ..train import SequenceArray, SamplerRandom, SamplerSequential
from ..transforms import criteria_feature_name, TransformCompose, TransformResize, TransformNormalizeIntensity
import xml.etree.ElementTree as ET
from ..basic_typing import Datasets
from ..transforms import Transform
from typing_extensions import Literal
from . import utils
import numpy as np
import torch
def _load_image_and_mask(batch, transform, normalize_0_1=True):
images = []
masks = []
for image_path, mask_path in zip(batch['images'], batch['masks']):
image = utils.pic_to_tensor(Image.open(image_path).convert('RGB'))
if normalize_0_1:
image = image.float() / 255.0
images.append(image)
mask = utils.pic_to_tensor(Image.open(mask_path))
masks.append(mask)
batch = {
'images': stack(images),
'masks': stack(masks),
}
if transform is not None:
batch = transform(batch)
return batch
def _parse_voc_xml(node):
"""
Extracted from torchvision
"""
voc_dict = {}
children = list(node)
if children:
def_dic = collections.defaultdict(list)
for dc in map(_parse_voc_xml, children):
for ind, v in dc.items():
def_dic[ind].append(v)
if node.tag == 'annotation':
def_dic['object'] = [def_dic['object']]
voc_dict = {
node.tag:
{ind: v[0] if len(v) == 1 else v
for ind, v in def_dic.items()}
}
if node.text:
text = node.text.strip()
if not children:
voc_dict[node.tag] = text
return voc_dict
OBJECT_CLASS_MAPPING = {
'aeroplane': 0,
'bicycle': 1,
'bird': 2,
'boat': 3,
'bottle': 4,
'bus': 5,
'car': 6,
'cat': 7,
'chair': 8,
'cow': 9,
'diningtable': 10,
'dog': 11,
'horse': 12,
'motorbike': 13,
'person': 14,
'pottedplant': 15,
'sheep': 16,
'sofa': 17,
'train': 18,
'tvmonitor': 19
}
def _load_image_and_bb(batch, transform, normalize_0_1=True):
images = []
annotations = []
sizes_cyx = []
object_class_by_image = []
object_bb_yx_by_image = []
label_difficulty_by_image = []
image_paths = []
for image_path, annotation_path in zip(batch['images'], batch['annotations']):
image_paths.append(image_path)
image = utils.pic_to_tensor(Image.open(image_path).convert('RGB'))
if normalize_0_1:
image = image.float() / 255.0
images.append(image)
annotation = _parse_voc_xml(ET.parse(annotation_path).getroot())['annotation']
annotations.append(annotation)
s = annotation['size']
sizes_cyx.append((
int(s['depth']),
int(s['height']),
int(s['width'])))
o_classes = []
o_bb = []
o_difficult = []
for o in annotation['object']:
o_classes.append(OBJECT_CLASS_MAPPING[o['name']])
box = o['bndbox']
o_difficult.append(int(o['difficult']))
o_bb.append([
float(box['ymin']),
float(box['xmin']),
float(box['ymax']),
float(box['xmax'])])
object_class_by_image.append(torch.from_numpy(np.asarray(o_classes, dtype=np.int64)))
label_difficulty_by_image.append(torch.tensor(o_difficult, dtype=torch.long))
# typically handled on CPU, so keep it as numpy
object_bb_yx_by_image.append(np.asarray(o_bb, dtype=np.float32))
image_scale = np.ones([len(images)], dtype=np.float32)
batch = {
'image_path': image_paths,
'sample_uid': batch['sample_uid'],
'images': images,
'image_scale': image_scale,
'annotations': annotations,
'sizes_cyx': sizes_cyx,
'object_class_by_image': object_class_by_image,
'label_difficulty_by_image': label_difficulty_by_image,
'object_bb_yx_by_image': object_bb_yx_by_image
}
if transform is not None:
batch = transform(batch)
return batch
def default_voc_transforms():
criteria_images = functools.partial(criteria_feature_name, feature_names=['images'])
return TransformCompose([
TransformResize(size=[250, 250]),
#TransformRandomCropPad(feature_names=['images', 'masks'], padding=None, shape=[3, 224, 224]),
TransformNormalizeIntensity(criteria_fn=criteria_images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def create_voc_segmentation_dataset(
batch_size: int = 40,
root: Optional[str] = None,
transform_train: Optional[List[Transform]] = default_voc_transforms(),
transform_valid: Optional[List[Transform]] = None,
nb_workers: int = 2,
year: Literal['2007', '2012'] = '2012') -> Datasets:
"""
Create the VOC segmentation dataset
Args:
batch_size: the number of samples per batch
root: the root of the dataset
transform_train: the transform to apply on each batch of data of the training data
transform_valid: the transform to apply on each batch of data of the validation data
nb_workers: the number of worker process to pre-process the batches
year: the version of the dataset
Returns:
a datasets with dataset `voc2012` and splits `train`, `valid`.
"""
if root is None:
# first, check if we have some environment variables configured
root = os.environ.get('TRW_DATA_ROOT')
if root is None:
# else default a standard folder
root = './data'
path = os.path.join(root, f'VOC{year}')
download = False
try:
# test if we have access to the data. If this fails, it means we need to download it!
torchvision.datasets.VOCSegmentation(root=path, image_set='val', transform=None, download=False, year=year)
except:
download = True
# train split
# here batch_size = 1 since the images do not all have the same size, so we need to process them
# independently. The `transform` should normalize the size (resize, central_crop) so that
# the images can be batched subsequently
train_dataset = torchvision.datasets.VOCSegmentation(
root=path, image_set='train', transform=None, download=download, year=year)
train_sequence = SequenceArray({
'images': train_dataset.images,
'masks': train_dataset.masks,
}, SamplerRandom(batch_size=1))
train_sequence = train_sequence.map(functools.partial(_load_image_and_mask, transform=transform_train), nb_workers=nb_workers, max_jobs_at_once=2 * nb_workers)
if batch_size != 1:
train_sequence = train_sequence.batch(batch_size)
# valid split
valid_dataset = torchvision.datasets.VOCSegmentation(
root=path, image_set='val', transform=None, download=download, year=year)
valid_sequence = SequenceArray({
'images': valid_dataset.images,
'masks': valid_dataset.masks,
}, SamplerSequential(batch_size=1))
valid_sequence = valid_sequence.map(functools.partial(_load_image_and_mask, transform=transform_valid), nb_workers=nb_workers, max_jobs_at_once=2 * nb_workers)
if batch_size != 1:
valid_sequence = valid_sequence.batch(batch_size)
return {
f'voc{year}_seg': collections.OrderedDict([
('train', train_sequence.collate()),
('valid', valid_sequence.collate()),
])
}
def create_voc_detection_dataset(
root: str = None,
transform_train: Optional[List[Transform]] = None,
transform_valid: Optional[List[Transform]] = None,
nb_workers: int = 2,
batch_size: int = 1,
data_subsampling_fraction_train: float = 1.0,
data_subsampling_fraction_valid: float = 1.0,
train_split: str = 'train',
valid_split: str = 'val',
year: Literal['2007', '2012'] = '2007') -> Datasets:
"""
PASCAL VOC detection challenge
Notes:
- Batch size is always `1` since we need to sample from the image various anchors,
locations depending on the task (so each sample should be post-processed by a custom
transform)
"""
if root is None:
# first, check if we have some environment variables configured
root = os.environ.get('TRW_DATA_ROOT')
if root is None:
# else default a standard folder
root = './data'
#path = os.path.join(root, f'VOC{year}') # TODO
path = root
download = False
try:
# test if we have access to the data. If this fails, it means we need to download it!
torchvision.datasets.VOCDetection(
root=path,
image_set=train_split,
transform=None,
download=False,
year=year
)
except:
download = True
train_dataset = torchvision.datasets.VOCDetection(
root=path,
image_set=train_split,
transform=None,
download=download,
year=year)
if data_subsampling_fraction_train < 1.0:
# resample the data if required
nb_train = int(len(train_dataset.images) * data_subsampling_fraction_train)
indices = np.random.choice(len(train_dataset.images), nb_train, replace=False)
train_dataset.images = np.asarray(train_dataset.images)[indices].tolist()
train_dataset.annotations = np.asarray(train_dataset.annotations)[indices].tolist()
train_sequence = SequenceArray({
'images': train_dataset.images,
'annotations': train_dataset.annotations
}, SamplerRandom(batch_size=batch_size))
train_sequence = train_sequence.map(functools.partial(_load_image_and_bb, transform=transform_train),
nb_workers=nb_workers, max_jobs_at_once=2 * nb_workers)
# valid split
valid_dataset = torchvision.datasets.VOCDetection(
root=path,
image_set=valid_split,
transform=None,
download=download,
year=year)
if data_subsampling_fraction_valid < 1.0:
# resample the data if required
nb_valid = int(len(valid_dataset.images) * data_subsampling_fraction_valid)
indices = np.random.choice(len(valid_dataset.images), nb_valid, replace=False)
valid_dataset.images = np.asarray(valid_dataset.images)[indices].tolist()
valid_dataset.annotations = np.asarray(valid_dataset.annotations)[indices].tolist()
valid_sequence = SequenceArray({
'images': valid_dataset.images,
'annotations': valid_dataset.annotations,
}, SamplerSequential(batch_size=batch_size))
valid_sequence = valid_sequence.map(functools.partial(_load_image_and_bb, transform=transform_valid),
nb_workers=nb_workers, max_jobs_at_once=2 * nb_workers)
return {
f'voc{year}_detect': collections.OrderedDict([
('train', train_sequence),
('valid', valid_sequence),
])
}
|
StarcoderdataPython
|
1859332
|
from auditor.manager import default_manager
from event_manager.event_service import EventService
class AuditorService(EventService):
"""An service that just passes the event to author services."""
__all__ = EventService.__all__ + ('log', 'notify', 'track')
event_manager = default_manager
def __init__(self):
self.notifier = None
self.tracker = None
self.activitylogs = None
def record_event(self, event):
"""
Record the event async.
"""
from polyaxon.celery_api import celery_app
from polyaxon.settings import EventsCeleryTasks
event = event.serialize(dumps=False, include_actor_name=True, include_instance_info=True)
celery_app.send_task(EventsCeleryTasks.EVENTS_TRACK, kwargs={'event': event})
celery_app.send_task(EventsCeleryTasks.EVENTS_LOG, kwargs={'event': event})
celery_app.send_task(EventsCeleryTasks.EVENTS_NOTIFY, kwargs={'event': event})
def notify(self, event):
self.notifier.record(event_type=event['type'], event_data=event)
def track(self, event):
self.tracker.record(event_type=event['type'], event_data=event)
def log(self, event):
self.activitylogs.record(event_type=event['type'], event_data=event)
def setup(self):
super().setup()
# Load default event types
import auditor.events # noqa
import notifier
import activitylogs
import tracker
self.notifier = notifier
self.tracker = tracker
self.activitylogs = activitylogs
|
StarcoderdataPython
|
1821404
|
<gh_stars>0
#!/usr/local/bin/python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudevents.http import CloudEvent, to_structured
import requests
import json
# Create a cloudevent using https://github.com/cloudevents/sdk-python
# Note we only need source and type because the cloudevents constructor by
# default will set "specversion" to the most recent cloudevent version (e.g. 1.0)
# and "id" to a generated uuid.uuid4 string.
attributes = {
"Content-Type": "application/json",
"source": "from-galaxy-far-far-away",
"type": "cloudevent.greet.you"
}
data = {"name":"john"}
event = CloudEvent(attributes, data)
# Send the event to our local docker container listening on port 8080
headers, data = to_structured(event)
requests.post("http://localhost:8080/", headers=headers, data=data)
|
StarcoderdataPython
|
1993010
|
import sublime
import os
import sys
import json
import csv
import urllib
import pprint
import sys
import re
import time
import datetime
import base64
import zipfile
import shutil
import subprocess
import webbrowser
import xml.dom.minidom
from .salesforce.lib import xmlformatter
from .salesforce import message
from .salesforce import xmltodict
from .salesforce.lib import dateutil
from .salesforce.lib.dateutil import tz
from .salesforce.lib.panel import Printer
from . import context
from xml.sax.saxutils import unescape
def load_templates():
settings = context.get_settings()
target_dir = os.path.join(settings["workspace"], ".templates")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
templates_dir = os.path.join(target_dir, "templates.json")
if not os.path.isfile(templates_dir):
source_dir = os.path.join(
sublime.installed_packages_path(),
"haoide.sublime-package"
)
if os.path.isfile(source_dir):
zfile = zipfile.ZipFile(source_dir, 'r')
for filename in zfile.namelist():
if filename.endswith('/'): continue
if filename.startswith("config/templates/"):
f = os.path.join(
target_dir,
filename.replace("config/templates/", "")
)
if not os.path.exists(os.path.dirname(f)):
os.makedirs(os.path.dirname(f))
with open(f, "wb") as fp:
fp.write(zfile.read(filename))
zfile.close()
else:
source_dir = os.path.join(
sublime.packages_path(), "haoide/config/templates"
)
copy_files_in_folder(source_dir, target_dir)
with open(templates_dir) as fp:
templates = json.loads(fp.read())
return templates
def copy_files_in_folder(source_dir, target_dir):
""" Copy folders and files in source dir to target dir
Paramter:
@source_dir -- Source Directory
@target_dir -- Target Directory
"""
for _file in os.listdir(source_dir):
sourceFile = os.path.join(source_dir, _file)
targetFile = os.path.join(target_dir, _file)
if os.path.isfile(sourceFile):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(targetFile) or (
os.path.exists(targetFile) and (
os.path.getsize(targetFile) != os.path.getsize(sourceFile)
)):
open(targetFile, "wb").write(open(sourceFile, "rb").read())
if os.path.isdir(sourceFile):
copy_files_in_folder(sourceFile, targetFile)
def copy_files(attributes, target_dir):
""" Copy files and its related meta file to target dir
Paramter:
@files -- file attributes, example: {
"fileDir": ".../classes/ABC.cls",
"fullName": "ABC"
}
@target_dir -- Target Directory
"""
try:
for attribute in attributes:
# Copy file to target dir
#
# Build target metdata folder, make it if not exist
target_meta_folder = os.path.join(
target_dir, "src",
attribute["metadata_folder"],
attribute.get("folder", "")
)
if not os.path.exists(target_meta_folder):
os.makedirs(target_meta_folder)
# Build target file
target_file = os.path.join(
target_meta_folder,
attribute["fullName"]
)
# Copy file to target file
fileDir = attribute["fileDir"]
with open(fileDir, "rb") as fread:
content = fread.read()
with open(target_file, "wb") as fwrite:
fwrite.write(content)
# Write meta file to target dir if exist
metaFileDir = fileDir + "-meta.xml"
if os.path.isfile(metaFileDir):
target_meta_file = target_file + "-meta.xml"
with open(metaFileDir, "rb") as fread:
content = fread.read()
with open(target_meta_file, "wb") as fwrite:
fwrite.write(content)
except Exception as ex:
Printer.get("error").write(str(ex))
return False
return True
def get_described_metadata(settings):
cache_file = os.path.join(
settings["workspace"],
".config",
"metadata.json"
)
described_metadata = None
if os.path.isfile(cache_file):
with open(cache_file) as fp:
described_metadata = json.loads(fp.read())
return described_metadata
def get_instance(settings):
""" Get instance by instance_url
Return:
* instance -- instance of active project, for example,
if instance_url is https://ap1.salesforce.com,
instance will be `ap1`,
if instance_url is https://company-name.cs18.my.salesforce.com
instance will be `company-name.cs18.my`
"""
session = get_session_info(settings)
instance_url = session["instance_url"]
base_url = re.compile("//[\s\S]+?\.").search(instance_url).group()
instance = base_url[2:-1]
return instance
def get_session_info(settings):
""" Get Session Info
Arguments:
* settings -- plugin settings
Return:
* session -- Session Info
"""
session = None
session_directory = os.path.join(settings["workspace"], ".config", "session.json")
if os.path.isfile(session_directory):
with open(session_directory) as fp:
session = json.loads(fp.read())
return session
def get_package_info(settings):
package = None
package_directory = os.path.join(settings["workspace"], ".config", "package.json")
if os.path.isfile(package_directory):
with open(package_directory) as fp:
package = json.loads(fp.read())
return package
def view_coverage(name, file_name, body):
settings = context.get_settings()
cache_file = os.path.join(settings["workspace"], ".config", "coverage.json")
coverages = {}
if os.path.isfile(cache_file):
coverages = json.loads(open(cache_file).read())
coverage = coverages.get(name.lower(), {})
if not coverage:
return Printer.get("error").write("No code coverage cache, " +\
"please execute `Run Sync Test` on related test class before view code coverage")
numLocationsNotCovered = coverage["numLocationsNotCovered"]
numLocations = coverage["numLocations"]
numLocationsCovered = numLocations - numLocationsNotCovered
linesNotCovered = [l["line"] for l in coverage["locationsNotCovered"]]
if numLocations == 0:
return Printer.get("error").write("There is no code coverage")
# Append coverage statistic info
coverage_statistic = "%s Coverage: %.2f%%(%s/%s)" % (
name, numLocationsCovered / numLocations * 100,
numLocationsCovered, numLocations
)
# If has coverage, just add coverage info to new view
view = sublime.active_window().new_file()
view.run_command("new_view", {
"name": coverage_statistic,
"input": body
})
# Calculate line coverage
split_lines = view.lines(sublime.Region(0, view.size()))
uncovered_region = []
covered_region = []
for region in split_lines:
# The first four Lines are the coverage info
line = view.rowcol(region.begin() + 1)[0] + 1
if line in linesNotCovered:
uncovered_region.append(region)
else:
covered_region.append(region)
# Append body with uncovered line
view.add_regions("numLocationsNotCovered", uncovered_region, "invalid", "dot",
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_EMPTY_AS_OVERWRITE)
view.add_regions("numLocationsCovered", covered_region, "comment", "cross",
sublime.DRAW_SOLID_UNDERLINE | sublime.DRAW_EMPTY_AS_OVERWRITE)
def get_local_timezone_offset():
""" Return the timezone offset of local time with GMT standard
Return:
* offset_hours -- date time offset hours with GMT
"""
localtz = dateutil.tz.tzlocal()
localoffset = localtz.utcoffset(datetime.datetime.now(localtz))
offset_hours = localoffset.total_seconds() / 3600
return offset_hours
# https://docs.python.org/3.2/library/datetime.html#strftime-and-strptime-behavior
# http://stackoverflow.com/questions/12015170/how-do-i-automatically-get-the-timezone-offset-for-my-local-time-zone
def local_datetime(server_datetime_str):
""" Convert the Datetime got from server to local GMT Datetime
Return:
* local_datetime -- local datetime with GMT offset
"""
offset = get_local_timezone_offset()
local_datetime = datetime.datetime.strptime(server_datetime_str[:19], '%Y-%m-%dT%H:%M:%S')
local_datetime += datetime.timedelta(hours=offset)
return local_datetime
def server_datetime(local_datetime):
""" Convert the Datetime got from local to GMT Standard
Return:
* server_datetime -- standard GMT server datetime
"""
offset = get_local_timezone_offset()
server_datetime = local_datetime + datetime.timedelta(hours=-offset)
return server_datetime
def populate_all_components():
""" Get all components from local cache
"""
# Get username
settings = context.get_settings()
username = settings["username"]
# If sobjects is exist in local cache, just return it
component_metadata = sublime.load_settings("component_metadata.sublime-settings")
if not component_metadata.has(username):
Printer.get('error').write("No Cache, Please New Project Firstly.")
return {}
return_component_attributes = {}
for component_type in component_metadata.get(username).keys():
component_attributes = component_metadata.get(username)[component_type]
for key in component_attributes.keys():
component_id = component_attributes[key]["id"]
component_type = component_attributes[key]["type"]
component_name = component_attributes[key]["name"]
return_component_attributes[component_type+"."+component_name] = component_id
return return_component_attributes
def populate_components(_type):
"""
Get dict (Class Name => Class Id) which NamespacePrefix is null in whole org
@return: {
classname: classid
...
}
"""
# Get username
settings = context.get_settings()
username = settings["username"]
# If sobjects is exist in local cache, just return it
component_settings = sublime.load_settings("component_metadata.sublime-settings")
if not component_settings.has(username):
message = "Please execute `Cache > Reload Sobject Cache` command before execute this command"
Printer.get("error").write(message)
return {}
return component_settings.get(username).get(_type)
def populate_lighting_applications():
settings = context.get_settings()
workspace = settings["workspace"]
username = settings["username"]
aura_path = os.path.join(workspace, "src", "aura")
component_settings = sublime.load_settings("component_metadata.sublime-settings")
if not component_settings.has(username):
return {}
aura_attributes = {}
aura_cache = component_settings.get(username).get("AuraDefinitionBundle")
for name in aura_cache:
aura_name, element_name = aura_cache[name]["fullName"].split("/")
if element_name.endswith(".app"):
aura_attributes[aura_name] = aura_cache[name]
return aura_attributes
def populate_sobjects_describe():
"""
Get the sobjects list in org.
"""
# Get username
settings = context.get_settings()
username = settings["username"]
# If sobjects is exist in sobjects_completion.sublime-settings, just return it
sobjects_completions = sublime.load_settings("sobjects_completion.sublime-settings")
if not sobjects_completions.has(username):
message = "Please execute `Cache > Reload Sobject Cache` command before execute this command"
Printer.get('error').write(message)
return
sobjects_describe = {}
sd = sobjects_completions.get(username)["sobjects"]
for key in sd:
sobject_describe = sd[key]
sobjects_describe[sobject_describe["name"]] = sobject_describe
return sobjects_describe
def populate_all_test_classes():
# Get username
settings = context.get_settings()
username = settings["username"]
component_metadata = sublime.load_settings("component_metadata.sublime-settings")
if not component_metadata.has(username):
Printer.get('error').write("No cache, please create new project firstly.")
return
classes = component_metadata.get(username)["ApexClass"]
test_class_ids = []
for class_name, class_attr in classes.items():
if not class_attr["is_test"]: continue
test_class_ids.append(class_attr["id"])
return test_class_ids
def set_component_attribute(attributes, lastModifiedDate):
""" Set the LastModifiedDate for specified component
Params:
* attributes -- component attributes
* lastModifiedDate -- LastModifiedDate of component
"""
# If sobjects is exist in local cache, just return it
settings = context.get_settings()
username = settings["username"]
s = sublime.load_settings("component_metadata.sublime-settings")
if not s.has(username):
return
_type = attributes["type"]
fullName = attributes["name"] + attributes["extension"]
components_dict = s.get(username, {})
# Prevent exception if no component in org
if _type not in components_dict:
components_dict = {_type : {}}
# Build components dict
attr = components_dict[_type][fullName.lower()]
attr["lastModifiedDate"] = lastModifiedDate
components_dict[_type][fullName.lower()] = attr
# Save settings and show success message
s.set(username, components_dict)
sublime.save_settings("component_metadata.sublime-settings")
def get_sobject_caches(setting_name):
""" Return the specified local cache of default project
Return:
* caches -- sobject local cache in default project
"""
config_settings = context.get_settings()
projects = config_settings["projects"]
settings = sublime.load_settings(setting_name)
caches = []
for p in projects:
if settings.has(projects[p]["username"]):
caches.append([p, projects[p]["username"]])
return caches
def clear_cache(username, setting_name):
""" Clear the specified local cache of default project
Arguments:
* username -- the login username of default project
"""
settings = sublime.load_settings(setting_name)
settings = settings.erase(username)
sublime.save_settings(setting_name)
sublime.status_message(username + " cache is cleared")
def get_sobject_metadata(username):
""" Return the sobject cache of default project
Arguments:
* username -- username of current default project
Returns:
* sobject metadata -- the sobject metadata of default project
"""
sobjects_settings = sublime.load_settings("sobjects_completion.sublime-settings")
sobjects_metadata = {}
if sobjects_settings.has(username):
sobjects_metadata = sobjects_settings.get(username, {})
return sobjects_metadata
def get_symbol_tables(username):
""" Return the sobject cache of default project
Arguments:
* username -- username of current default project
Returns:
* sobject metadata -- the sobject metadata of default project
"""
symbol_tables = {}
symbol_tables_settings = sublime.load_settings("symbol_table.sublime-settings")
if symbol_tables_settings.has(username):
symbol_tables = symbol_tables_settings.get(username, {})
return symbol_tables
def get_sobject_completion_list(
sobject_describe,
prefix="",
display_fields=True,
display_parent_relationships=True,
display_child_relationships=True):
""" Return the formatted completion list of sobject
Arguments:
* sobject_describe -- describe result of sobject
* prefix -- optional; sometimes, parent relationshipName may refer to multiple sobject,
so we need to add the prefix to distinct different completions
* display_child_relationships -- optional; indicate whether display sobject child relationship names
"""
# Fields Describe
completion_list = []
if display_fields:
fields = sobject_describe["fields"]
for field_name_desc in sorted(fields):
field_name = fields[field_name_desc]
completion = ("%s%s" % (prefix, field_name_desc), field_name)
completion_list.append(completion)
# Parent Relationship Describe
if display_parent_relationships:
for key in sorted(sobject_describe["parentRelationships"]):
parent_sobject = sobject_describe["parentRelationships"][key]
completion_list.append((prefix + key + "\t" + parent_sobject + "(c2p)", key))
# Child Relationship Describe
if display_child_relationships:
for key in sorted(sobject_describe["childRelationships"]):
child_sobject = sobject_describe["childRelationships"][key]
completion_list.append((prefix + key + "\t" + child_sobject + "(p2c)", key))
return completion_list
def get_component_completion(username, component_type, tag_has_ending=False):
""" Return the formatted completion list of component
Return:
* completion_list -- all apex component completion list
"""
completion_list = []
component_settings = sublime.load_settings(context.COMPONENT_METADATA_SETTINGS)
if not component_settings.has(username): return completion_list
component_attrs = component_settings.get(username)
if component_type in component_attrs:
components = component_attrs[component_type]
for name in components:
if "name" not in components[name]: continue
component_name = components[name]["name"]
if component_type == "ApexComponent":
display = "c:%s\t%s" % (component_name, component_type)
value = "c:%s%s" % (
component_name, "" if tag_has_ending else "$1>"
)
completion_list.append((display, value))
else:
completion_list.append((component_name+"\t"+component_type, component_name))
return completion_list
def get_component_attributes(settings, component_name):
component_dir = os.path.join(settings["workspace"], "src",
"components", component_name+".component")
completion_list = []
if os.path.isfile(component_dir):
name, _type, description = "", "", ""
with open(component_dir) as fp:
try:
content = fp.read()
except UnicodeDecodeError as ex:
return completion_list
pattern = "<apex:attribute[\\S\\s]*?>"
for match in re.findall(pattern, content, re.IGNORECASE):
pattern = '\\w+\\s*=\\s*"[\\s\\S]*?"'
for m in re.findall(pattern, match, re.IGNORECASE):
attr, value = m.split('=')
attr, value = attr.strip(), value.strip()
value = value[1:-1]
if attr.lower() == "name":
name = value
if attr.lower() == "type":
_type = value
if attr.lower() == "description":
description = value
if name and _type:
display = "%s\t%s(%s)" % (name, description, _type.capitalize())
value = '%s="$1"$0' % name
completion_list.append((display, value))
return completion_list
def convert_15_to_18(the15Id):
""" Convert Salesforce 15 Id to 18 Id
Arguments:
* the15Id - to be converted 15 Id
Return:
* 18 Id - converted 18 Id
"""
if not the15Id or len(the15Id) != 15: return the15Id
cmap = {
"00000": "A", "00001": "B", "00010": "C", "00011": "D", "00100": "E",
"00101": "F", "00110": "G", "00111": "H", "01000": "I", "01001": "J",
"01010": "K", "01011": "L", "01100": "M", "01101": "N", "01110": "O",
"01111": "P", "10000": "Q", "10001": "R", "10010": "S", "10011": "T",
"10100": "U", "10101": "V", "10110": "W", "10111": "X", "11000": "Y",
"11001": "Z", "11010": "0", "11011": "1", "11100": "2", "11101": "3",
"11110": "4", "11111": "5"
}
chars = [cmap["".join(["1" if c.isupper() else "0" for c in char[::-1]])] \
for char in list_chunks(the15Id, 5)]
return the15Id + "".join(chars)
def list_chunks(l, n):
""" Yield successive n-sized chunks from l.
Arguments:
* l - to be chunked list
* n - split size
"""
for i in range(0, len(l), n):
yield l[i:i+n]
def dict_chunks(data, SIZE=10000):
from itertools import islice
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k:data[k] for k in islice(it, SIZE)}
def open_with_browser(show_url, use_default_chrome=True):
""" Utility for open file in browser
Arguments:
* use_default_browser -- optional; if true, use chrome configed in settings to open it
"""
settings = context.get_settings()
browser_path = settings["default_chrome_path"]
if not use_default_chrome or not os.path.exists(browser_path):
webbrowser.open_new_tab(show_url)
else:
webbrowser.register('chrome', None, webbrowser.BackgroundBrowser(browser_path))
webbrowser.get('chrome').open_new_tab(show_url)
def remove_comments(view, regions):
# Get all comment regions
comment_regions = view.find_by_selector('comment')
matched_regions = []
for region in regions:
# check whether region is comment statement
invalid_region = False
for comment_region in comment_regions:
if comment_region.contains(region):
invalid_region = True
break
if "\n" in view.substr(region):
invalid_region = True
break
# Check whether DML statement, for example
# insert prd | update prd | delete prd
# insert is not the correct variable type
pattern = '(insert|update|upsert|delete|undelete)+\\s+'
if re.match(pattern, view.substr(region), re.IGNORECASE):
continue
# If region is comment statement, just skip
if not invalid_region:
matched_regions.append(region)
return matched_regions
def get_variable_type(view, pt, pattern):
"""Return the matched soql region
Arguments:
* view -- current active view
* pt - the cursor point
* pattern - the regular expression for finding matched region
"""
# Get the matched variable type
matched_regions = view.find_all(pattern, sublime.IGNORECASE)
uncomment_regions = remove_comments(view, matched_regions)
# Three scenarios:
# 1. If no matched regions
# 2. Only one matched region
# 3. More than one matched region
if not uncomment_regions:
return ""
elif len(uncomment_regions) == 1:
matched_region = uncomment_regions[0]
else:
row_region = {} # Row => Region
for mr in uncomment_regions:
row, col = view.rowcol(mr.begin())
row_region[row] = mr
# Get the cursor row
cursor_row = view.rowcol(pt)[0]
# Three steps:
# 1. Add the cursor row and matched rows together
# 2. Sort all rows by ASC
# 3. Get the previous row of cursor row
rows = list(row_region.keys())
rows.append(cursor_row)
rows = sorted(rows)
cursor_index = rows.index(cursor_row)
matched_region = row_region[rows[cursor_index - 1]]
# Get the content of matched region
matched_str = view.substr(matched_region).strip()
# If list, map, set
if "<" in matched_str and ">" in matched_str:
variable_type = matched_str.split("<")[0].strip()
# String[] strs;
elif "[]" in matched_str:
variable_type = 'list'
# String str;
else:
variable_type = matched_str.split(" ")[0]
return variable_type
def get_soql_match_region(view, pt):
"""Return the mgatched soql region
Arguments:
* view -- current Active View
Return:
* matched_region -- Found SOQL
* sobject_name -- Found Sobject Name in SOQL
* is_between_start_and_from -- the cursor point is between start and the last from
"""
pattern = "SELECT\\s+[^;]+FROM\\s+[1-9_a-zA-Z]+"
matched_regions = view.find_all(pattern, sublime.IGNORECASE)
matched_region = None
is_between_start_and_from = False
sobject_name = None
for m in matched_regions:
if m.contains(pt):
matched_region = m
break
if not matched_region:
return (matched_region, is_between_start_and_from, sobject_name)
match_str = view.substr(matched_region)
match_begin = matched_region.begin()
select_pos = match_str.lower().find("select")
from_pos = match_str.lower().rfind("from")
if pt >= (select_pos + match_begin) and pt <= (from_pos + match_begin):
is_between_start_and_from = True
sobject_name = match_str[from_pos+5:]
sobject_name = sobject_name.strip()
return matched_region, is_between_start_and_from, sobject_name
def parse_symbol_table(symbol_table):
"""Parse the symbol_table to completion (Copied From MavensMate)
Arguments:
* symbol_table -- ApexClass Symbol Table
"""
completions = {}
if not symbol_table:
return completions;
for c in symbol_table.get('constructors', []):
params = []
modifiers = " ".join(c.get("modifiers", []))
if 'parameters' in c and type(c['parameters']) is list and len(c['parameters']) > 0:
for p in c['parameters']:
params.append(p["type"].capitalize() + " " + p["name"])
paramStrings = []
for i, p in enumerate(params):
paramStrings.append("${"+str(i+1)+":"+params[i]+"}")
paramString = ", ".join(paramStrings)
completions[modifiers+" "+c["name"]+"("+", ".join(params)+")"] =\
"%s(%s)" % (c["name"], paramString)
else:
completions[modifiers+" "+c["name"]+"()"] = c["name"]+"()${1:}"
for c in symbol_table.get('properties', []):
modifiers = " ".join(c.get("modifiers", []))
property_type = c["type"].capitalize() if "type" in c and c["type"] else ""
completions[modifiers+" "+c["name"]+"\t"+property_type] = c["name"]
for c in symbol_table.get('methods', []):
params = []
modifiers = " ".join(c.get("modifiers", []))
if 'parameters' in c and type(c['parameters']) is list and len(c['parameters']) > 0:
for p in c['parameters']:
params.append(p["type"]+" "+p["name"])
if len(params) == 1:
completions[modifiers+" "+c["name"]+"("+", ".join(params)+") \t"+c['returnType']] =\
"%s(${1:%s})" % (c["name"], ", ".join(params))
elif len(params) > 1:
paramStrings = []
for i, p in enumerate(params):
paramStrings.append("${"+str(i+1)+":"+params[i]+"}")
paramString = ", ".join(paramStrings)
completions[modifiers+" "+c["name"]+"("+", ".join(params)+") \t"+c['returnType']] =\
c["name"]+"("+paramString+")"
else:
completions[modifiers+" "+c["name"]+"("+", ".join(params)+") \t"+c['returnType']] =\
c["name"]+"()${1:}"
for c in symbol_table.get("innerClasses", []):
tableDeclaration = c.get("tableDeclaration")
modifiers = " ".join(tableDeclaration.get("modifiers", []))
modifiers = modifiers + " " if modifiers else ""
# Add inner class completion without parameters
completions["%s%s\tInner Class" % (modifiers, c["name"])] = "%s$1" % c["name"]
# Add inner class constructor completion
if 'constructors' in c and len(c['constructors']) > 0:
for con in c['constructors']:
modifiers = " ".join(con.get("modifiers", []))
params = []
if 'parameters' in con and type(con['parameters']) is list and len(con['parameters']) > 0:
for p in con['parameters']:
params.append(p["type"].capitalize()+" "+p["name"])
paramStrings = []
for i, p in enumerate(params):
paramStrings.append("${"+str(i+1)+":"+params[i]+"}")
paramString = ", ".join(paramStrings)
completions[modifiers+" "+con["name"]+"("+", ".join(params)+")"] =\
c["name"]+"("+paramString+")"
else:
completions[modifiers+" "+con["name"]+"()"] =\
c["name"]+"()${1:}"
return completions
def add_operation_history(operation, history_content):
"""Keep the history in the local history
Arguments:
* operation -- the operation source
* history_content -- the content needed to keep
"""
settings = context.get_settings()
if not settings["keep_operation_history"]: return
splits = operation.split("/")
if len(splits) == 1:
folder, operation = "", splits[0]
elif len(splits) == 2:
folder, operation = splits
outputdir = settings["workspace"] + "/.history/" + folder
if not os.path.exists(outputdir):
os.makedirs(outputdir)
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
history = "%s\n```java\n%s\n```\n\n" % (time_stamp, history_content)
fp = open(outputdir + "/%s.md" % operation, "ab")
fp.write(history.encode("utf-8"))
fp.close()
def add_config_history(operation, content, settings, ext="json"):
"""Keep the history in the local history
Arguments:
* operation -- the operation source
* history_content -- the content needed to keep
"""
outputdir = os.path.join(settings["workspace"], ".config")
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(outputdir + "/%s.%s" % (operation, ext), "w") as fp:
fp.write(json.dumps(content, indent=4))
# After write the file to local, refresh sidebar
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 200);
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 1300);
def export_report_api(rootdir):
reports = []
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
if not filename.endswith(".report"): continue
report_dir = parent + "/" + filename
report_folder = os.path.split(parent)[1]
report_name = filename.split(".")[0]
report_api = getUniqueElementValueFromXmlString(open(report_dir, "rb").read(), "name")
# report_dict[report_api] = report_name
reports.append({"name": report_name, "api": report_api, "folder": report_folder})
list2csv(rootdir + "/test.csv", reports)
def check_action_enabled():
"""If project in current date is not created, new component is not enabled
Returns:
* * -- whether project in current date is exist
"""
# Check project workspace is available
settings = context.get_settings()
if not os.path.exists(settings["workspace"]): return False
# Check whether describe_metadata request is finished
described_metadata = get_described_metadata(settings)
return described_metadata is not None
def get_view_by_name(view_name):
"""Get view by view name
Arguments:
* view_name -- name of view in sublime
Returns:
* view -- sublime open tab
"""
view = None
for win in sublime.windows():
for v in win.views():
if v.name() == view_name:
view = v
return view
def get_view_by_file_name(file_name):
"""
Get the view in the active window by the view_name
Arguments:
* view_id: view name
Returns:
* return: view
"""
view = None
for v in sublime.active_window().views():
if not v.file_name(): continue
if file_name in v.file_name():
view = v
return view
def get_view_by_id(view_id):
"""
Get the view in the active window by the view_id
* view_id: id of view
* return: view
"""
view = None
for v in sublime.active_window().views():
if not v.id(): continue
if v.id() == view_id:
view = v
return view
def get_child_types(parent_type):
""" Get child types by parent type
Parameter:
* parent_type -- Parent Metadata Object
Return:
* child_types -- Child Metadata Objects of parent
"""
settings = context.get_settings()
child_types = settings[parent_type].get("childXmlNames", [])
if isinstance(child_types, str):
child_types = [child_types]
return child_types
def parse_package_types(_types):
""" Build structure
From: {
"CustomObject": ["A__c", "B__c"],
"CustomField": ["A__c.A__c", "A__c.A1__c", "B__c.B__c"],
"ValidationRule": ["A__c.VR1", "B__c.BR2"]
"ApexClass": ["AClass", "BClass", "CClass"]
}
To: {
"CustomObject": {
"A__c": {
"CustomField": ["A.A__c", "A.B__c"],
"ValidationRule": ["A.VR1"]
},
"B__c": {
"CustomField": ["B__c.B__c"],
"ValidationRule": ["B__c.BR2"]
}
},
"ApexClass": ["A", "B", "C"]
}
"""
settings = context.get_settings()
package_types = {}
for _type, elements in _types.items():
attr = settings[_type]
_child_types = attr.get("childXmlNames", [])
# If _type is child type, for example,
# CustomField, ListView
if _type != attr["xmlName"]:
continue
# If no child XML
if not _child_types:
# If no elements, don't keep it
if not elements:
continue
# inFolder is false
if attr["inFolder"] == "false":
package_types[_type] = elements
else:
# Build structure as {folder: [elements]}
folder_elements = {}
for folder in [e for e in elements if "/" not in e]:
folder_elements[folder] = [
e for e in elements if e.startswith(folder) \
and "/" in e
]
package_types[_type] = folder_elements
continue
if isinstance(_child_types, str):
_child_types = [_child_types]
child_cache = {}
for _child_type in _child_types:
if _child_type not in _types:
continue
parent_to_children = {}
for parent in elements:
children = []
for _child_element in _types[_child_type]:
if _child_element.startswith(parent):
children.append(_child_element)
if children:
parent_to_children[parent] = children
if parent_to_children:
child_cache[_child_type] = parent_to_children
package_types[_type] = child_cache
# view = sublime.active_window().new_file()
# view.run_command("new_view", {
# "name": "test",
# "input": json.dumps(package_types)
# })
return package_types
def build_package_types(package_xml_content):
result = xmltodict.parse(package_xml_content)
elements = []
metadata_types = result["Package"]["types"]
# If there is only one types in package
if isinstance(metadata_types, dict):
metadata_types = [metadata_types]
types = {}
for t in metadata_types:
name = t["name"]
members = t["members"]
if isinstance(members, str):
types[name] = [members]
elif isinstance(members, list):
types[name] = members
return types
def build_folder_types(dirs):
""" Build folders_dict for folder refreshing
{
"ApexClass": ["*"],
"ApexTrigger": ["*"],
"CustomObject": ["*"]
}
"""
settings = context.get_settings()
dname = settings["default_project_name"]
types = {}
for _dir in dirs:
base, folder = os.path.split(_dir)
if folder not in settings: continue
if dname not in _dir: continue
xml_name = settings[folder]["xmlName"]
types[xml_name] = ["*"]
return types
def build_package_dict(files, ignore_folder=True):
""" Build Package Dict as follow structure by files
{
'ApexClass': [{
'dir': <file path>,
'folder': 'classes',
'name': 'AccountController',
'metadata_name': 'AccountController',
'extension': '.cls'
}],
'ApexComponent': [{
'dir': <file path>,
'folder': 'components',
'name': 'SiteFooter',
'metadata_name': 'SiteFooter',
'extension': '.component'
}]
}
"""
settings = context.get_settings()
package_dict = {}
for f in files:
# Ignore folder
if ignore_folder and not os.path.isfile(f):
continue
# Ignore "-meta.xml"
if f.endswith("-meta.xml"):
continue
# If ignore_folder is true and f is folder
attributes = get_file_attributes(f)
metadata_folder = attributes["metadata_folder"]
mo = settings[metadata_folder]
metadata_object = mo["xmlName"]
file_dict = {
"name": attributes["name"],
"metadata_name": attributes["name"],
"dir": f,
"folder": attributes["folder"] if "folder" in attributes else "",
"metadata_folder": attributes["metadata_folder"],
"extension": attributes["extension"]
}
if mo["inFolder"] == "true":
file_dict["metadata_name"] = "%s/%s" % (
attributes["folder"], attributes["name"]
)
if metadata_folder == "aura":
file_dict["metadata_name"] = "%s" % attributes["folder"]
# Build dict
if metadata_object in package_dict:
package_dict[metadata_object].append(file_dict)
else:
package_dict[metadata_object] = [file_dict]
return package_dict
def build_package_xml(settings, package_dict):
""" Build Package XML as follow structure
<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<met:members>*</met:members>
<met:members>Account</met:members>
<name>CustomObject</name>
</types>
<version>32.0</version>
</Package>
"""
# Build types for package.xml
types = []
for meta_type, values in package_dict.items():
if values and "metadata_name" in values[0]:
members = ["<members>%s</members>" % v["metadata_name"] for v in values]
else:
members = ["<members>%s</members>" % v for v in values]
types.append("""
<types>
%s
<name>%s</name>
</types>
""" % (" ".join(members), meta_type))
# Build package.xml
package_xml_content = """<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
%s
<version>%s.0</version>
</Package>
""" % (" ".join(types), settings["api_version"])
return package_xml_content
def build_destructive_package_by_files(files, ignore_folder=True):
settings = context.get_settings()
workspace = settings["workspace"]
if not os.path.exists(workspace):
os.makedirs(workspace)
# Constucture package dict
package_dict = build_package_dict(files, ignore_folder)
# Build destructiveChanges.xml
destructive_xml_content = build_package_xml(settings, package_dict)
destructive_xml_path = workspace+"/destructiveChanges.xml"
with open(destructive_xml_path, "wb") as fp:
fp.write(destructive_xml_content.encode("utf-8"))
# Build package.xml
package_xml_content = build_package_xml(settings, {})
package_xml_path = workspace+"/package.xml"
with open(package_xml_path, "wb") as fp:
fp.write(package_xml_content.encode("utf-8"))
# Create temp zipFile
zipfile_path = workspace + "/test.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
# Compress destructive_xml and package_xml into temp zipFile
# After that, close the input stream
zf.write(package_xml_path, "package.xml")
zf.write(destructive_xml_path, "destructiveChanges.xml")
zf.close()
# Remove temp files
os.remove(package_xml_path)
os.remove(destructive_xml_path)
# base64 encode zip package
base64_package = base64_encode(zipfile_path)
# Remove temporary `test.zip`
os.remove(zipfile_path)
return base64_package
def build_destructive_package_by_package_xml(types):
""" Build destructive package,
Arguments:
* types -- see below json:
{
"ApexClass": ["AClass", "BClass"],
"ApexTrigger": ["ATrigger", "BTrigger"],
...
}
Return:
* base64_encode -- base64 encode zip file,
which contains destructiveChanges.xml and package.xml
"""
settings = context.get_settings()
workspace = settings["workspace"]
# Build destructiveChanges.xml
destructive_xml_content = build_package_xml(settings, types)
destructive_xml_path = workspace+"/destructiveChanges.xml"
with open(destructive_xml_path, "wb") as fp:
fp.write(destructive_xml_content.encode("utf-8"))
# Build package.xml
package_xml_content = build_package_xml(settings, {})
package_xml_path = workspace+"/package.xml"
with open(package_xml_path, "wb") as fp:
fp.write(package_xml_content.encode("utf-8"))
# Create temp zipFile
zipfile_path = workspace + "/test.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
# Compress destructive_xml and package_xml into temp zipFile
# After that, close the input stream
zf.write(package_xml_path, "package.xml")
zf.write(destructive_xml_path, "destructiveChanges.xml")
zf.close()
# Remove temp files
os.remove(package_xml_path)
os.remove(destructive_xml_path)
# base64 encode zip package
base64_package = base64_encode(zipfile_path)
# Remove temporary `test.zip`
os.remove(zipfile_path)
return base64_package
def build_deploy_package(files):
# Initiate zipfile
settings = context.get_settings()
if not os.path.exists(settings["workspace"]):
os.makedirs(settings["workspace"])
zipfile_path = settings["workspace"] + "/test.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
# Get package dict
package_dict = build_package_dict(files)
# Add files to zip
for meta_type in package_dict:
for f in package_dict[meta_type]:
# Define write_to
write_to = (
f["metadata_folder"],
("/" + f["folder"]) if f["folder"] else "",
f["name"],
f["extension"]
)
# If lighting component, add all realted file to zip too
if f["metadata_folder"] == "aura":
base = os.path.split(f["dir"])[0]
for parent, dirnames, filenames in os.walk(base):
for filename in filenames:
aura_file = os.path.join(parent, filename)
zf.write(aura_file, "aura/%s/%s" % (
f["folder"], filename
))
else:
zf.write(f["dir"], "%s%s/%s.%s" % write_to)
# If -meta.xml is exist, add it to folder
met_xml = f["dir"] + "-meta.xml"
if os.path.isfile(met_xml):
zf.write(met_xml, "%s%s/%s.%s-meta.xml" % write_to)
# Prepare package XML content
package_xml_content = build_package_xml(settings, package_dict)
package_xml_content = format_xml(package_xml_content)
if settings["debug_mode"]:
print ("{seprate}\n[Package.xml for Deployment]: \n{seprate}\n{content}\n{seprate}".format(
seprate="~" * 100,
content=package_xml_content.decode("UTF-8")
))
# Write package content to .package path
try:
time_stamp = time.strftime("%Y%m%d%H%M", time.localtime(time.time()))
xml_dir = os.path.join(settings["workspace"], ".deploy")
if not os.path.exists(xml_dir):
os.mkdir(xml_dir)
# http://stackoverflow.com/questions/1627198/python-mkdir-giving-me-wrong-permissions
if not os.access(xml_dir, os.W_OK):
os.chmod(xml_dir, 0o755)
xml_dir = os.path.join(xml_dir, "package-%s.xml" % time_stamp)
with open(xml_dir, "wb") as fp:
fp.write(package_xml_content)
zf.write(xml_dir, "package.xml")
except Exception as ex:
if settings["debug_mode"]:
print ('When save package.xml, encounter error: %s' % str(ex))
# Close zip input stream
zf.close()
# base64 encode zip package
base64_package = base64_encode(zipfile_path)
# Remove temporary `test.zip`
if not settings["debug_mode"]:
os.remove(zipfile_path)
return base64_package
def compress_resource_folder(resource_folder):
""" Prepare base64 encoded zip for uploading static resource
Arguments:
* resource_folder - static resource folder in project
"""
static_resource_path, resource_name = os.path.split(resource_folder)
# Create StaticResource File
static_resource_file = os.path.join(static_resource_path, resource_name+".resource")
zf = zipfile.ZipFile(static_resource_file, "w", zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(resource_folder):
basename = dirpath[len(resource_folder)+1:]
for filename in filenames:
zf.write(os.path.join(dirpath, filename), basename+"/"+filename)
zf.close()
# Build package
base64_package = build_deploy_package([static_resource_file])
return base64_package
def build_aura_package(files_or_dirs):
# Build package
settings = context.get_settings()
workspace = settings["workspace"]
if not os.path.exists(workspace): os.makedirs(workspace)
zipfile_path = workspace+"/aura.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
aura_names = []
for _file_or_dir in files_or_dirs:
if os.path.isfile(_file_or_dir):
base, aura_element = os.path.split(_file_or_dir)
base, aura_name = os.path.split(base)
base, meta_type = os.path.split(base)
aura_names.append(aura_name)
zf.write(_file_or_dir, "%s/%s/%s" % (meta_type, aura_name, aura_element))
else:
base, aura_name = os.path.split(_file_or_dir)
base, meta_type = os.path.split(base)
aura_names.append(aura_name)
for dirpath, dirnames, filenames in os.walk(_file_or_dir):
base, aura_name = os.path.split(dirpath)
if not filenames:
zf.write(dirpath, meta_type+"/"+aura_name)
else:
for filename in filenames:
zf.write(os.path.join(dirpath, filename), "%s/%s/%s" % (meta_type, aura_name, filename))
# Write package.xml to zip
package_xml_content = """<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
%s
<name>AuraDefinitionBundle</name>
</types>
<version>%s.0</version>
</Package>
""" % ("\n".join(["<members>%s</members>" % a for a in aura_names]), settings["api_version"])
package_xml_path = settings["workspace"]+"/package.xml"
open(package_xml_path, "wb").write(package_xml_content.encode("utf-8"))
zf.write(package_xml_path, "package.xml")
os.remove(package_xml_path)
# Close zip input stream
zf.close()
# base64 encode zip package
base64_package = base64_encode(zipfile_path)
# Remove temporary `test.zip`
os.remove(zipfile_path)
return base64_package
def base64_encode(zipfile):
with open(zipfile, "rb") as f:
bytes = f.read()
base64String = base64.b64encode(bytes)
return base64String.decode('UTF-8')
def compress_package(package_dir):
zipfile_path = package_dir+"/archive.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(package_dir):
basename = dirpath[len(package_dir)+1:]
for filename in filenames:
zf.write(os.path.join(dirpath, filename), basename+"/"+filename)
zf.close()
base64_package = base64_encode(zipfile_path)
os.remove(zipfile_path)
return base64_package
def extract_encoded_zipfile(encoded_zip_file, extract_to, ignore_package_xml=False):
""" Decode the base64 encoded file and
extract the zip file to workspace and
rename the "unpackaged" to "src"
"""
if not os.path.exists(extract_to):
os.makedirs(extract_to)
zipfile_path = os.path.join(extract_to, "package.zip")
with open(zipfile_path, "wb") as fout:
fout.write(base64.b64decode(encoded_zip_file))
fout.close()
extract_file(zipfile_path, extract_to, ignore_package_xml)
# Remove original src tree
os.remove(zipfile_path)
# In windows, folder is not shown in the sidebar,
# we need to refresh the sublime workspace to show it
sublime.active_window().run_command("refresh_folder_list")
def extract_zipfile(zipfile_path, extract_to):
""" Extract Zip File to current folder
"""
try:
zfile = zipfile.ZipFile(zipfile_path, 'r')
except zipfile.BadZipFile as ex:
raise BaseException(str(ex))
if not os.path.exists(extract_to):
os.makedirs(extract_to)
for filename in zfile.namelist():
if filename.endswith('/'): continue
f = os.path.join(extract_to, "", filename)
if not os.path.exists(os.path.dirname(f)):
os.makedirs(os.path.dirname(f))
with open(f, "wb") as fp:
fp.write(zfile.read(filename))
zfile.close()
def extract_file(zipfile_path, extract_to, ignore_package_xml=False):
zfile = zipfile.ZipFile(zipfile_path, 'r')
for filename in zfile.namelist():
if filename.endswith('/'):
continue
if ignore_package_xml and filename == "unpackaged/package.xml":
continue
if filename.startswith("unpackaged"):
f = os.path.join(extract_to, filename.replace("unpackaged", "src"))
else:
f = os.path.join(extract_to, "packages", filename)
if not os.path.exists(os.path.dirname(f)):
os.makedirs(os.path.dirname(f))
with open(f, "wb") as fp:
fp.write(zfile.read(filename))
zfile.close()
def extract_zip(base64String, extract_to):
"""
1. Decode base64String to zip
2. Extract zip to files
"""
# Decode base64String to zip
if not os.path.exists(extract_to): os.makedirs(extract_to)
zipfile_path = extract_to + "/package.zip"
with open(zipfile_path, "wb") as fout:
fout.write(base64.b64decode(base64String))
# Extract file to target path
extract_file(zipfile_path, extract_to)
# Remove package.zip
os.remove(zipfile_path)
return zipfile_path
def parse_package(package_content):
"""Parse package types to specified format
Arguments:
* package_path -- package content to parse
Convert
```
<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>*</members>
<name>ApexClass</name>
</types>
<version>32.0</version>
</Package>
```
To
```
<types>
<met:members>*</met:members>
<name>ApexClass</name>
</types>
```
"""
result = xmltodict.parse(package_content)
elements = []
types = result["Package"]["types"]
# If there is only one types in package
if isinstance(types, dict): types = [types]
for t in types:
members = []
if "members" in t and isinstance(t["members"], list):
for member in t["members"]:
members.append("<met:members>%s</met:members>" % member)
else:
members.append("<met:members>%s</met:members>" % t["members"])
elements.append("<types>%s%s</types>" % (
"".join(members),
"<name>%s</name>" % t["name"]
))
return "".join(elements) + "<met:version>%s</met:version>" % result["Package"]["version"]
def reload_file_attributes(file_properties, settings=None, append=False):
""" Keep the file attribute to local cache
Paramter:
* file_properties -- file attributes returned from server
* settings -- whole plugin settings
* append -- default is False, if append is false, it means local cache
of default project are reloaded by file properties, otherwise,
file properties will be appended to local cache
"""
# Get settings
if not settings:
settings = context.get_settings()
metadata_body_or_markup = {
"ApexClass": "Body",
"ApexTrigger": "Body",
"StaticResource": "Body",
"ApexPage": "Markup",
"ApexComponent": "Markup"
}
# If the package only contains `package.xml`
if isinstance(file_properties, dict):
file_properties = [file_properties]
component_settings = sublime.load_settings(context.COMPONENT_METADATA_SETTINGS)
csettings = component_settings.get(settings["username"], {})
all_components_attr = csettings if append else {}
for filep in file_properties:
metdata_object = filep["type"]
# Ignore package.xml
if metdata_object == "Package":
continue
components_attr = {}
if metdata_object in all_components_attr:
components_attr = all_components_attr[metdata_object]
base_name = filep['fileName'][filep['fileName'].rfind("/")+1:]
last_point = base_name.rfind(".")
name = base_name[:last_point]
extension = ".%s" % base_name[last_point+1:]
attrs = {
"namespacePrefix": filep.get("namespacePrefix", None),
"name": name,
"fileName": filep['fileName'],
"fullName": filep["fullName"],
"extension": extension,
"type": metdata_object,
"lastModifiedDate": filep["lastModifiedDate"],
"id": filep["id"]
}
if metdata_object in metadata_body_or_markup:
attrs["body"] = metadata_body_or_markup[metdata_object]
attrs["url"] = "/services/data/v%s.0/sobjects/%s/%s" % (
settings["api_version"], metdata_object, filep["id"]
)
# Check whether component is Test Class or not
if metdata_object == "ApexClass":
cl = name.lower()
attrs["is_test"] = cl.startswith("test") or cl.endswith("test")
components_attr[base_name.lower()] = attrs
all_components_attr[metdata_object] = components_attr
for metadata_object, v in all_components_attr.items():
csettings[metadata_object] = v
component_settings.set(settings["username"], csettings)
sublime.save_settings(context.COMPONENT_METADATA_SETTINGS)
# Reload component metadata cache in globals()
sublime.set_timeout(lambda:load_metadata_cache(True, settings["username"]), 5)
def format_debug_logs(settings, records):
if len(records) == 0: return "No available logs."
# Used to list debug logs as below format
debug_log_headers = [
"Id", "StartTime", "DurationMilliseconds", "Status", "LogLength", "Operation"
]
debug_log_headers_properties = {
"Id": {
"width": 20,
"label": "Log Id"
},
"StartTime": {
"width": 22,
"label": "Start Time"
},
"Request": {
"width": 13,
"label": "Request Type"
},
"Application": {
"width": 12,
"label": "Application"
},
"Status": {
"width": 10,
"label": "Status"
},
"LogLength": {
"width": 8,
"label": "Size(b)"
},
"DurationMilliseconds": {
"width": 13,
"label": "Duration(ms)"
},
"Operation": {
"width": 50,
"label": "Operation"
}
}
# Headers
headers = ""
for header in debug_log_headers:
headers += "%-*s" % (debug_log_headers_properties[header]["width"],
debug_log_headers_properties[header]["label"])
# Content
content = ""
records = sorted(records, key=lambda k : k['StartTime'])
for record in records:
for header in debug_log_headers:
if header == "StartTime":
content += "%-*s" % (debug_log_headers_properties[header]["width"],
local_datetime(record[header]))
continue
content += "%-*s" % (debug_log_headers_properties[header]["width"], record[header])
content += "\n"
return "\n" + headers + "\n" + (len(headers) * "-") + "\n" + content[:len(content)-1]
def format_error_message(result):
"""Format message as below format
message: The requested resource does not exist
url: url
errorCode: NOT_FOUND
status_code: 404
* result -- dict error when request status code > 399
* return -- formated error message
"""
# Add time stamp
result["Time Stamp"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
error_message = ""
for key, value in result.items():
if isinstance(value, list):
if value: value = value[0]
else: continue
elif not value: continue
error_message += "% 30s\t" % "{0}: ".format(key)
value = urllib.parse.unquote(unescape(none_value(value),
{"'": "'", """: '"'}))
error_message += "%-30s\t" % value + "\n"
return error_message[:len(error_message)-1]
def format_waiting_message(result, header=""):
error_message = header + "\n" + "-" * 100 + "\n"
for key in result:
if isinstance(result[key], list): continue
error_message += "% 30s\t" % "{0}: ".format(key)
error_message += "%-30s\t" % none_value(result[key]) + "\n"
if "messages" in result:
messages = result["messages"]
error_message += message.SEPRATE.format("Deploy Messages")
for key in messages[0].keys():
error_message += "%-30s" % key.capitalize()
error_message += "\n"
for msg in messages:
for key in msg:
error_message += "%-30s" % none_value(msg[key])
error_message += "\n"
return error_message
def format_xml(xml_string, indent="4"):
"""Return formatted XML string
Arguments:
* xml_string -- required parameter, not formatted XML string
* indent -- optional parameter, format indent
Returns:
* content -- formatted XML string
"""
try:
formatter = xmlformatter.Formatter(indent=indent)
content = formatter.format_string(xml_string)
except xml.parsers.expat.ExpatError as e:
content = xml_string.encode("utf-8")
return content
def none_value(value):
""" If value is None, return "", if not, return string format of value
Returns:
* value -- converted value
"""
if not value: return ""
return "%s" % value
def is_python3x():
"""
If python version is 3.x, return True
"""
return sys.version > '3'
"""
Below three functions are used to parse completions out of box.
"""
def parse_namespace(publicDeclarations):
"""
from . import util
import json
namespace_json = util.parse_namespace(publicDeclarations["publicDeclarations"])
json.dump(namespace_json, open("c:/namespace.json",'w'))
"""
namespaces_dict = {}
for namespace in publicDeclarations:
namespaces_dict[namespace] = list(publicDeclarations[namespace].keys())
return namespaces_dict
def parse_method(methods, is_method=True):
if not methods: return {}
methods_dict = {}
for method in methods:
if not method["name"]: continue
if not is_method:
returnType = ''
else:
returnType = method["returnType"]
if not method["parameters"]:
methods_dict["%s()\t%s" % (method["name"], returnType)] = method["name"] + "()$0"
else:
display_parameters = []
for parameter in method["parameters"]:
display_parameters.append(parameter["type"] + " " + parameter["name"])
return_parameters = []
for i in range(len(display_parameters)):
return_parameters.append("${%s:%s}" % (i + 1, display_parameters[i]))
methods_dict["%s(%s)\t%s" % (method["name"], ', '.join(display_parameters), returnType)] =\
"%s(%s)$0" % (method["name"], ', '.join(return_parameters))
return methods_dict
def parse_properties(properties):
if not properties: return {}
properties_dict = {}
for property in properties:
properties_dict[property["name"]] = property["name"] + "$0"
return properties_dict
def parse_all(apex):
"""
Usage:
from . import util
import json
apex_json = util.parse_all(apex)
from .salesforce.lib import apex
return_apex = {}
for lib in apex.apex_completions:
if "customize" in apex.apex_completions[lib]:
apex_json[lib] = apex.apex_completions[lib]
json.dump(apex_json, open("/Users/mouse/apex.json",'w'))
"""
apex_completions = {}
for namespace in apex.keys():
for class_name in apex[namespace]:
class_detail = apex[namespace][class_name]
constructors_dict = parse_method(class_detail["constructors"], False)
methods_dict = parse_method(class_detail["methods"])
properties_dict = parse_properties(class_detail["properties"])
# all_dict = dict(list(methods_dict.items()) + list(properties_dict.items()))
# Below class are duplicate in different namespace
# Communities, TimeZone, UnsupportedOperationException, Test, QueryException, Action
if class_name.lower() in apex_completions:
apex_completions[class_name.lower()] = [apex_completions[class_name.lower()]]
apex_completions[class_name.lower()].append({
"constructors" : constructors_dict,
"methods" : methods_dict,
"properties" : properties_dict,
"namespace" : namespace,
"name": class_name
})
else:
apex_completions[class_name.lower()] = {}
apex_completions[class_name.lower()]["constructors"] = constructors_dict
apex_completions[class_name.lower()]["methods"] = methods_dict
apex_completions[class_name.lower()]["properties"] = properties_dict
apex_completions[class_name.lower()]["namespace"] = namespace
apex_completions[class_name.lower()]["name"] = class_name
return apex_completions
def parse_code_coverage(result):
records = {}
for record in result["records"]:
name = record["ApexClassOrTrigger"]["Name"]
records[name] = {
"NumLinesCovered" : record["NumLinesCovered"],
"NumLinesUncovered": record["NumLinesUncovered"]
}
code_coverage_desc = message.SEPRATE.format("TriggerOrClass Code Coverage:")
columns = ""
header_width = {
"Name": 50, "Percent": 10, "Lines": 10
}
for column in ["Name", "Percent", "Lines"]:
columns += "%-*s" % (header_width[column], column)
code_coverage = ""
for name in sorted(records):
row = ""
row += "%-*s" % (header_width["Name"], name)
coverage = records[name]
if not coverage["NumLinesCovered"] or not coverage["NumLinesUncovered"]:
continue
covered_lines = coverage["NumLinesCovered"]
total_lines = covered_lines + coverage["NumLinesUncovered"]
coverage = covered_lines / total_lines * 100 if total_lines != 0 else 0
row += "%-*s" % (header_width["Percent"], "%.2f%%" % coverage)
row += "%-*s" % (header_width["Lines"], "%s/%s" % (covered_lines, total_lines))
code_coverage += row + "\n"
return message.SEPRATE.format(code_coverage_desc + columns + "\n"*2 + code_coverage)
def parse_sync_test_coverage(result):
successes = result["successes"]
failures = result["failures"]
codeCoverage = result["codeCoverage"]
allrows = []
if result["failures"]:
allrows.append("Failed Test Methods:")
for failure in sorted(result["failures"], key=lambda k : k["name"]):
allrows.append("~" * 80)
failure_row = []
failure_row.append("% 30s %-30s " % ("ClassName: ", failure["name"]))
failure_row.append("% 30s %-30s " % ("MethodName: ", failure["methodName"]))
failure_row.append("% 30s %-30s " % ("SeeAllData: ", failure["seeAllData"]))
failure_row.append("% 30s %-30s " % ("Pass/Fail: ", "Fail"))
failure_row.append("% 30s %-30s " % ("StackTrace: ", failure["stackTrace"]))
failure_row.append("% 30s %-30s " % ("Message: ", failure["message"]))
failure_row.append("% 30s %-30s " % ("Time: ", failure["time"]))
allrows.append("\n".join(failure_row))
if result["successes"]:
allrows.append("~" * 80)
allrows.append("Successful Test Methods:")
for success in sorted(result["successes"], key=lambda k : k["name"]):
allrows.append("~" * 80)
success_row = []
success_row.append("% 30s %-30s " % ("ClassName: ", success["name"]))
success_row.append("% 30s %-30s " % ("MethodName: ", success["methodName"]))
success_row.append("% 30s %-30s " % ("SeeAllData: ", success["seeAllData"]))
success_row.append("% 30s %-30s " % ("Pass/Fail: ", "Pass"))
success_row.append("% 30s %-30s " % ("Time: ", success["time"]))
allrows.append("\n".join(success_row))
allrows.append("~" * 80)
allrows.append("Follow the instruction as below, you can quickly view code coverage,")
allrows.append(" * Put focus on code name, hold down 'alt' and Dblclick the 'Left Mouse'")
header_width = {
"Type": 15, "Name": 50, "Percent": 10, "Lines": 10
}
columns = []
for column in ["Type", "Name", "Percent", "Lines"]:
columns.append("%-*s" % (header_width[column], column))
coverageRows = []
coverageRows.append("~" * 80)
coverageRows.append("".join(columns))
coverageRows.append("~" * 80)
codeCoverage = sorted(result["codeCoverage"], reverse=True,
key=lambda k : 0 if k["numLocations"] == 0 else (k["numLocations"] - k['numLocationsNotCovered']) / k["numLocations"])
for coverage in codeCoverage:
coverageRow = []
coverageRow.append("%-*s" % (header_width["Type"], coverage["type"]))
coverageRow.append("%-*s" % (header_width["Name"], coverage["name"]))
# Calculate coverage percent
numLocationsNotCovered = coverage["numLocationsNotCovered"]
numLocations = coverage["numLocations"]
numLocationsCovered = numLocations - numLocationsNotCovered
percent = numLocationsCovered / numLocations * 100 if numLocations != 0 else 0
coverageRow.append("%-*s" % (
header_width["Percent"],
"%.2f%%" % percent
))
coverageRow.append("%-*s" % (
header_width["Lines"], "%s/%s" % (
numLocationsCovered,
numLocations
)
))
coverageRows.append("".join(coverageRow))
allrows.append("\n".join(coverageRows))
return "\n".join(allrows)
def parse_test_result(test_result):
"""
format test result as specified format
* result: Run Test Request result
* return: formated string
"""
# Parse Test Result
test_result_desc = ' Test Result\n'
test_result_content = ""
class_name = ""
for record in test_result:
test_result_content += "-" * 80 + "\n"
test_result_content += "% 30s " % "MethodName: "
test_result_content += "%-30s" % none_value(record["MethodName"]) + "\n"
test_result_content += "% 30s " % "TestTimestamp: "
test_result_content += "%-30s" % none_value(record["TestTimestamp"]) + "\n"
test_result_content += "% 30s " % "ApexClass: "
class_name = record["ApexClass"]["Name"]
test_result_content += "%-30s" % class_name + "\n"
test_result_content += "% 30s " % "Pass/Fail: "
test_result_content += "%-30s" % none_value(record["Outcome"]) + "\n"
test_result_content += "% 30s " % "Error Message: "
test_result_content += "%-30s" % none_value(record["Message"]) + "\n"
test_result_content += "% 30s " % "Stack Trace: "
test_result_content += "%-30s" % none_value(record["StackTrace"]) + "\n"
return_result = class_name + test_result_desc + test_result_content[:-1]
# Parse Debug Log Part
info = "You can choose the LogId and view log detail " +\
"in Sublime or Salesforce by context menu"
debug_log_desc = message.SEPRATE.format(info)
debug_log_content = "LogId: "
if len(test_result) > 0 and test_result[0]["ApexLogId"] != None:
debug_log_content += test_result[0]["ApexLogId"]
return_result += debug_log_desc + debug_log_content
return return_result
def parse_validation_rule(settings, sobjects):
""" Parse the validation rule in Sobject.object to csv
* settings -- toolingapi.sublime-settings reference
* sobject -- sobject name
* validation_rule_path -- downloaded objects path by Force.com IDE or ANT
"""
# Open target file
outputdir = settings["workspace"] + "/.export"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
# Initiate CSV Writer and Write headers
columns = settings["validation_rule_columns"]
with open(outputdir + "/ValidationRules.csv", "wb") as fp:
fp.write(u'\ufeff'.encode('utf8')) # Write BOM Header
fp.write(",".join(columns).encode("utf-8") + b"\n") # Write Header
# Open workflow source file
validation_rule_path = settings["workspace"] + "/src/objects"
for sobject in sobjects:
try:
with open(validation_rule_path + "/" + sobject + ".object", "rb") as f:
result = xmltodict.parse(f.read())
except IOError:
# If one sobject is not exist, We don't need do anything
continue
######################################
# Rules Part
######################################
try:
rules = result["CustomObject"]["validationRules"]
fp = open(outputdir + "/ValidationRules.csv", "ab")
write_metadata_to_csv(fp, columns, rules, sobject)
except KeyError:
# If one sobject doesn't have vr, We don't need do anything
pass
def parse_workflow_metadata(settings, sobjects):
"""Parse Sobject.workflow to csv, including rule, field update and alerts
* settings -- toolingapi.sublime-settings reference
* sobject -- sobject name
* workflow_metadata_path -- downloaded workflow path by Force.com IDE or ANT
"""
# Create workflow dir
outputdir = settings["workspace"] + "/.export"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
workflow_config = {
"rules": {
"file_name": "Workflow Rules",
"setting_name": "workflow_rule_columns"
},
"fieldUpdates": {
"file_name": "Workflow Field Updates",
"setting_name": "workflow_field_update_columns"
},
"alerts": {
"file_name": "Workflow Email Alerts",
"setting_name": "workflow_email_alert_columns"
},
"outboundMessages": {
"file_name": "Workflow Outbound Messages",
"setting_name": "workflow_outbound_message_columns"
},
"tasks": {
"file_name": "Workflow Tasks",
"setting_name": "workflow_task_columns"
}
}
for config in workflow_config:
setting_name = workflow_config[config]["setting_name"]
file_name = workflow_config[config]["file_name"]
columns = settings[setting_name]
rule_outputdir = outputdir + "/%s.csv" % file_name
# If file is exist, just remove it
if os.path.isfile(rule_outputdir):
os.remove(rule_outputdir)
# Write Header
with open(rule_outputdir, "wb") as fp:
fp.write(u'\ufeff'.encode('utf8')) # Write BOM Header
fp.write(",".join([(c[0].upper() + c[1:]) for c in columns]).encode("utf-8") + b"\n") # Write Header
# Append Body
rule_path = settings["workspace"] + "/src/workflows"
for sobject in sobjects:
try:
with open(rule_path + "/" + sobject + ".workflow", "rb") as f:
result = xmltodict.parse(f.read())
except IOError:
# If one sobject is not exist, We don't need do anything
continue
try:
rules = result["Workflow"][config]
write_metadata_to_csv(open(rule_outputdir, "ab"), columns, rules, sobject)
except KeyError:
# If one sobject doesn't have vr, We don't need do anything
pass
def write_metadata_to_csv(fp, columns, metadata, sobject):
""" This method is invoked by function in this module
Arguments:
* fp -- output csv file open reference
* columns -- your specified metadata workbook columns in settings file
* metadata -- metadata describe
"""
# If sobject has only one rule, it will be dict
# so we need to convert it to list
if isinstance(metadata, dict):
metadata_temp = [metadata]
metadata = metadata_temp
columns = [col for col in columns if col != "sobject"]
row_values = b""
for rule in metadata:
row_value = [sobject]
for key in columns:
# Because Workflow rule criteria has different type
# If key is not in rule, just append ""
if key not in rule.keys():
row_value.append("")
continue
cell_value = rule[key]
if isinstance(cell_value, dict):
cell_value = [cell_value]
if isinstance(cell_value, list):
value = ''
if len(cell_value) > 0:
if isinstance(cell_value[0], dict):
for cell_dict in cell_value:
values = []
for cell_dict_key in cell_dict.keys():
if not cell_dict[cell_dict_key]:
values.append("")
else:
if cell_dict_key == "operation":
values.append("<%s>" % cell_dict[cell_dict_key])
else:
values.append(cell_dict[cell_dict_key])
value += " ".join(values) + "\n"
else:
value = " ".join(cell_value) + "\n"
cell_value = value[ : -1]
else:
cell_value = ""
elif not cell_value:
cell_value = ""
else:
cell_value = "%s" % cell_value
# Unescape special code to normal
cell_value = urllib.parse.unquote(unescape(cell_value,
{"'": "'", """: '"'}))
# Append cell_value to list in order to write list to csv
if '"' in cell_value:
cell_value = '"%s"' % cell_value.replace('"', '""')
else:
cell_value = '"%s"' % cell_value
row_value.append(cell_value)
# Write row
row_value_bin = ",".join(row_value)
row_values += row_value_bin.encode("utf-8") + b"\n"
fp.write(row_values) # Write Body
fp.close()
NOT_INCLUDED_COLUMNS = ["urls", "attributes"]
def list2csv(file_path, records):
"""convert simple dict in list to csv
Arguments:
* records -- [{"1": 1}, {"2": 2}]
"""
# If records size is 0, just return
if len(records) == 0: return "No Elements"
headers = [k.encode('utf-8') for k in records[0] if k not in NOT_INCLUDED_COLUMNS]
with open(file_path, "wb") as fp:
fp.write(b",".join(headers) + b"\n")
for record in records:
values = []
for k in headers:
strk = str(k, encoding="utf-8")
if strk not in record:
values.append(b"")
else:
values.append(('"%s"' % none_value(record[strk])).encode("utf-8"))
fp.write(b",".join(values) + b"\n")
def parse_data_template_vertical(output_file_dir, result):
"""Parse the data template to csv by page layout
Arguments:
* output_file_dir -- output dir for parsed result
* result -- page layout describe result
"""
field_lables = []
field_apis = []
fields_required = []
fields_type = []
fields_picklist_labels = []
fields_picklist_values = []
for edit_layout_section in result["editLayoutSections"]:
if isinstance(edit_layout_section["layoutRows"], list):
layout_rows = edit_layout_section["layoutRows"]
elif isinstance(edit_layout_section["layoutRows"], dict):
layout_rows = [edit_layout_section["layoutRows"]]
for layout_row in layout_rows:
if isinstance(layout_row["layoutItems"], list):
layout_items = layout_row["layoutItems"]
elif isinstance(layout_row["layoutItems"], dict):
layout_items = [layout_row["layoutItems"]]
for layout_item in layout_items:
if not layout_item["label"]: continue
for layout_component in layout_item["layoutComponents"]:
# Some layout_component is blank
if "details" not in layout_component: continue
# Get field describe
details = layout_component["details"]
# If field type is AutoNumber, just skip
if details["autoNumber"]: continue
field_lables.append(details["label"])
field_apis.append(details["name"])
fields_required.append("Required" if layout_item["required"] else "")
fields_type.append(details["type"].capitalize())
picklist_labels = []
picklist_values = []
for picklist in details["picklistValues"]:
picklist_labels.append(picklist["label"])
picklist_values.append(picklist["value"])
fields_picklist_labels.append('"%s"' % "\n".join(picklist_labels))
fields_picklist_values.append('"%s"' % "\n".join(picklist_values))
# Write field_lables and field apis
# Create new csv
with open(output_file_dir, "wb") as fp:
fp.write(u'\ufeff'.encode('utf8'))
fp.write(",".join(field_lables).encode("utf-8") + b"\n")
fp.write(",".join(field_apis).encode("utf-8") + b"\n")
fp.write(",".join(fields_type).encode("utf-8") + b"\n")
fp.write(",".join(fields_required).encode("utf-8") + b"\n")
fp.write(",".join(fields_picklist_labels).encode("utf-8") + b"\n")
fp.write(",".join(fields_picklist_values).encode("utf-8") + b"\n")
def parse_data_template_horizontal(output_file_dir, result):
"""Parse the data template to csv by page layout
Arguments:
* output_file_dir -- output dir for parsed result
* result -- page layout describe result
"""
rows = ["Label,Name,Required?,Type,Picklist Label,Picklist Value"]
for edit_layout_section in result["editLayoutSections"]:
if isinstance(edit_layout_section["layoutRows"], list):
layout_rows = edit_layout_section["layoutRows"]
elif isinstance(edit_layout_section["layoutRows"], dict):
layout_rows = [edit_layout_section["layoutRows"]]
for layout_row in layout_rows:
if isinstance(layout_row["layoutItems"], list):
layout_items = layout_row["layoutItems"]
elif isinstance(layout_row["layoutItems"], dict):
layout_items = [layout_row["layoutItems"]]
for layout_item in layout_items:
if not layout_item["label"]: continue
for layout_component in layout_item["layoutComponents"]:
# Some layout_component is blank
if "details" not in layout_component: continue
# Get field describe
details = layout_component["details"]
# If field type is AutoNumber, just skip
if details["autoNumber"]: continue
picklist_labels = []
picklist_values = []
for picklist in details["picklistValues"]:
picklist_labels.append(picklist["label"])
picklist_values.append(picklist["value"])
row = []
row.append(details["label"])
row.append(details["name"])
row.append("Required" if layout_item["required"] else "")
row.append(details["type"].capitalize())
row.append('"%s"' % "\n".join(picklist_labels))
row.append('"%s"' % "\n".join(picklist_values))
rows.append(",".join(row))
# Write field_lables and field apis
# Create new csv
with open(output_file_dir, "wb") as fp:
fp.write(u'\ufeff'.encode('utf8'))
fp.write("\n".join(rows).encode("utf-8"))
def get_soql_fields(soql):
""" Get the field list of soql
for example, soql is :
SELECT Id, Name, Owner.Name, Owner.FirstName FROM Account lIMIT 10
field list is : ['Id', 'Name', 'Owner.Name', 'Owner.FirstName']
"""
match = re.match("SELECT\\s+[\\w\\n,.:_\\s]*\\s+FROM", soql, re.IGNORECASE)
fieldstr = match.group(0)[6:-4].replace(" ", "").replace("\n", "")
return fieldstr.split(",")
def query_to_csv(result, soql):
records = result["records"]
if not records:
return b"No matched rows"
# Get CSV headers
if re.compile("select\s+\*\s+from[\s\t]+\w+", re.I).match(soql):
headers = sorted(list(records[0].keys()))
else:
headers = get_soql_fields(soql)
# Append columns part into rows
rows = ",".join(['"%s"' % h for h in headers]).encode("utf-8") + b"\n"
for record in records:
row = []
for header in headers:
row_value = record
for _header in header.split("."):
# Avoid KeyError when parsed the row value,
# Build mapping between lower case and normal
field_case_mapping = {}
for k in row_value:
field_case_mapping[k.lower()] = k
row_value = row_value[field_case_mapping[_header.lower()]]
if not isinstance(row_value, dict):
break
value = none_value(row_value)
value = value.replace('"', '""')
row.append('"%s"' % value)
rows += ",".join(row).encode("utf-8") + b"\n"
return rows
def parse_execute_anonymous_xml(result):
"""Return the formatted anonymous execute result
Arguments:
* result -- execute anonymous result, it's a xml
* return -- formated string
"""
compiled = result["compiled"]
debugLog = result["debugLog"]
view_result = ''
if compiled == "true":
view_result = debugLog
elif compiled == "false":
line = result["line"]
column = result["column"]
compileProblem = result["compileProblem"]
view_result = compileProblem + " at line " + line +\
" column " + column
view_result = urllib.parse.unquote(unescape(view_result,
{"'": "'", """: '"'}))
return view_result
def generate_workbook(result, workspace, workbook_field_describe_columns):
""" generate workbook for sobject according to user customized columns
you can change the workbook_field_describe_columns in default settings
Arguments:
* result -- sobject describe result
* workspace -- your specified workspace in toolingapi.sublime-settings
* workflow_field_update_columns -- your specified workbook columns in toolingapi.sublime-settings
"""
# Get sobject name
sobject = result.get("name")
# Get fields
fields = result.get("fields")
fields_key = workbook_field_describe_columns
# If workbook path is not exist, just make it
outputdir = workspace + "/.export/workbooks"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
# Create new csv file for this workbook
# fp = open(outputdir + "/" + sobject + ".csv", "wb", newline='')
workbook_dir = outputdir + "/" + sobject + ".csv"
#------------------------------------------------------------
# Headers, all headers are capitalized
#------------------------------------------------------------
headers = [column.capitalize() for column in fields_key]
# Write Header
fp = open(workbook_dir, "wb")
fp.write(u'\ufeff'.encode('utf8')) # Write BOM Header
fp.write(",".join(headers).encode("utf-8") + b"\n") # Write Header
#------------------------------------------------------------
# Fields Part (All rows are sorted by field label)
#------------------------------------------------------------
fields = sorted(fields, key=lambda k : k['label'])
for field in fields:
row_value_literal = b""
row_values = []
# Check field type
field_type = field["type"] if not field["calculatedFormula"] \
else "Formula(%s)" % field["type"]
for key in fields_key:
# Get field value by field API(key)
row_value = field.get(key)
if isinstance(row_value, list):
if key == "picklistValues":
value = ''
if len(row_value) > 0:
for item in row_value:
value += item.get("value") + "\n"
row_value = value
else:
row_value = ""
elif key == "referenceTo":
if len(row_value) > 0:
row_value = row_value[0]
else:
row_value = ""
elif not row_value:
row_value = ""
else:
row_value = field_type if key == "type" else "%s" % row_value
# Unescape special code to normal
row_value = urllib.parse.unquote(unescape(row_value,
{"'": "'", """: '"'}))
# Append row_value to list in order to write list to csv
if '"' in row_value:
row_value = '"%s"' % row_value.replace('"', '""')
else:
row_value = '"%s"' % row_value
row_values.append(row_value)
# Write row
row_value_literal += ",".join(row_values).encode("utf-8") + b"\n"
fp.write(row_value_literal)
# Close fp
fp.close()
# Display Success Message
sublime.set_timeout(lambda:sublime.status_message(sobject + " workbook is generated"), 10)
# Return outputdir
return outputdir
record_keys = ["label", "name", "type", "length"]
record_key_width = {
"label": 40,
"name": 40,
"type": 20,
"length": 7
}
recordtype_key_width = {
"available": 10,
"recordTypeId": 20,
"name": 35,
"defaultRecordTypeMapping": 15
}
childrelationship_key_width = {
"field": 35,
"relationshipName": 35,
"childSObject": 30,
"cascadeDelete": 12
}
seprate = 100 * "-" + "\n"
def parse_sobject_field_result(result):
"""According to sobject describe result, display record type information,
child sobjects information and the field information.
Arguments:
* result -- sobject describe information, it's a dict
* return -- formated string including the three parts
"""
# Get sobject name
sobject = result.get("name")
# View Name or Header
view_result = sobject + " Describe:\n"
#------------------------------------------------
# Fields Part
#------------------------------------------------
# Output totalSize Part
fields = result.get("fields")
view_result += seprate
view_result += "Total Fields: \t" + str(len(fields)) + "\n"
view_result += seprate
# Ouput Title and seprate line
columns = ""
for key in record_keys:
key_width = record_key_width[key]
columns += "%-*s" % (key_width, key.capitalize())
view_result += columns + "\n"
view_result += len(columns) * "-" + "\n"
# Sort fields list by lable of every field
fields = sorted(fields, key=lambda k : k['label'])
# Output field values
for record in fields:
row = ""
for key in record_keys:
row_value = "Formula(%s)" % record.get(key) if key == "type" \
and record["calculatedFormula"] else record.get(key)
if not row_value:
row_value = ""
key_width = record_key_width[key]
row_value = "%-*s" % (key_width, row_value)
row += row_value
view_result += row + "\n"
view_result += "\n"
#------------------------------------------------
# Record Type Part
#------------------------------------------------
recordtypes = result.get("recordTypeInfos")
view_result += seprate
view_result += "Record Type Info: \t" + str(len(recordtypes)) + "\n"
view_result += seprate
# Get Record Type Info Columns
recordtype_keys = []
if len(recordtypes) > 0:
recordtype_keys = recordtypes[0].keys()
columns = ""
for key in recordtype_keys:
if not key in recordtype_key_width: continue
key_width = recordtype_key_width[key]
if key == "defaultRecordTypeMapping": key = "default"
columns += "%-*s" % (key_width, key.capitalize())
view_result += columns + "\n"
view_result += len(columns) * "-" + "\n"
for recordtype in recordtypes:
row = ""
for key in recordtype_keys:
if key not in recordtype_key_width: continue
# Get field value by field API
# and convert it to str
row_value = recordtype.get(key)
if not row_value:
row_value = ""
key_width = recordtype_key_width[key]
row_value = "%-*s" % (key_width, row_value)
row += row_value
view_result += row + "\n"
view_result += "\n"
#------------------------------------------------
# Child Relationship
#------------------------------------------------
childRelationships = result.get("childRelationships")
view_result += seprate
view_result += "ChildRelationships Info: \t" + str(len(childRelationships)) + "\n"
view_result += seprate
# Get Record Type Info Columns
childRelationships_keys = childrelationship_key_width.keys()
columns = ""
for key in childRelationships_keys:
columns += "%-*s" % (30, key.capitalize())
view_result += columns + "\n"
view_result += len(columns) * "-" + "\n"
for childRelationship in childRelationships:
row = ""
for key in childRelationships_keys:
# Get field value by field API
# and convert it to str
row_value = childRelationship.get(key)
if not row_value:
row_value = ""
row_value = "%-*s" % (30, row_value)
row += row_value
view_result += row + "\n"
view_result += "\n"
return view_result
def getUniqueElementValueFromXmlString(xmlString, elementName):
"""
Extracts an element value from an XML string.
For example, invoking
getUniqueElementValueFromXmlString('<?xml version="1.0" encoding="UTF-8"?><foo>bar</foo>', 'foo')
should return the value 'bar'.
"""
xmlStringAsDom = xml.dom.minidom.parseString(xmlString)
elementsByName = xmlStringAsDom.getElementsByTagName(elementName)
elementValue = None
if len(elementsByName) > 0:
elementValue = elementsByName[0].toxml().replace('<' +\
elementName + '>','').replace('</' + elementName + '>','')
return unescape(elementValue, {"'": "'", """: '"'})
def get_response_error(response):
# Debug Message
settings = context.get_settings()
if settings["debug_mode"]:
print (response.content)
content = response.content
result = {"success": False}
try:
if response.status_code == 500:
result["Error Message"] = getUniqueElementValueFromXmlString(content, "faultstring")
else:
result["Error Message"] = getUniqueElementValueFromXmlString(content, "message")
except:
result["Error Message"] = response.content
return result
def get_path_attr(path_or_file):
"""Return project name and component folder attribute
Arguments:
* path_or_file -- file name or path
Returns:
* project_name -- project name of default project
* folder -- folder describe defined in settings, for example, ApexClass foder is 'src/classes'
"""
# Get the Folder Name and Project Name
if os.path.isfile(path_or_file):
path_or_file = os.path.split(path_or_file)[0]
path, metadata_folder = os.path.split(path_or_file)
path, src = os.path.split(path)
path, project_name = os.path.split(path)
# Assume the project name has time suffix,
settings = context.get_settings()
if settings["keep_project_name_time_suffix"]:
project_name = project_name[:-9]
return project_name, metadata_folder
def get_file_attributes(file_name):
attributes = {}
base, fullName = os.path.split(file_name)
if "." in fullName:
name = fullName[:fullName.rfind(".")]
extension = fullName[fullName.rfind(".")+1:]
else:
name, extension = fullName, ""
attributes["fullName"] = fullName
attributes["name"] = name
attributes["extension"] = extension
base, folder = os.path.split(base)
base, metafolder_or_src = os.path.split(base)
if metafolder_or_src == "src":
attributes["metadata_folder"] = folder
# If we choose folder name of an aura element
# actually, its name is also its folder name
if not os.path.isfile(file_name):
attributes["folder"] = name
else:
attributes["folder"] = folder
attributes["metadata_folder"] = metafolder_or_src
return attributes
def get_metadata_folder(file_name):
""" Get the metadata_folder by file_name
* file_name -- Local component full file name, for example:
if file name is "/pro-exercise-20130625/src/classes/AccountChartController.cls",
the metadata_folder is "classes"
Returns:
* metadata_folder -- the metadata folder
"""
attributes = get_file_attributes(file_name)
return attributes["metadata_folder"]
def load_metadata_cache(reload_cache=False, username=None):
""" Reload component cache in globals()
"""
if reload_cache or "components" not in globals():
component_metadata = sublime.load_settings(context.COMPONENT_METADATA_SETTINGS)
if not username:
username = context.get_setting("username")
globals()["components"] = component_metadata.get(username, {})
return globals()["components"]
def get_component_attribute(file_name, switch=True, reload_cache=False):
"""
get the component name by file_name, and then get the component_url and component_id
by component name and local settings
Arguments:
* file_name -- Local component full file name, for example:
/pro-exercise-20130625/src/classes/AccountChartController.cls
Returns:
* (component_attribute, file name) -- for example, component_attribute = {
"body": "Body",
"extension": ".cls",
"id": "01pO00000009isEIAQ",
"is_test": false,
"type": "ApexClass",
"url": "/services/data/v28.0/sobjects/ApexClass/01pO00000009isEIAQ"
}
"""
# Get toolingapi settings
settings = context.get_settings()
# Check whether current file is subscribed component
attributes = get_file_attributes(file_name)
metadata_folder = attributes["metadata_folder"]
name = attributes["name"]
fullName = attributes["fullName"]
if metadata_folder not in settings["all_metadata_folders"]:
return None, None
# Check whether project of current file is active project
default_project_name = settings["default_project_name"]
if switch and default_project_name.lower() not in file_name.lower():
return None, None
xml_name = settings[metadata_folder]["xmlName"]
username = settings["username"]
components = load_metadata_cache(reload_cache=reload_cache, username=username)
try:
component_attribute = components[xml_name][fullName.lower()]
except:
component_attribute, name = None, None
# Return tuple
return (component_attribute, name)
def check_enabled(file_name, check_cache=True):
"""
Check whether file is ApexTrigger, ApexComponent, ApexPage or ApexClass
Arguments:
* file_name -- file name in context
Returns:
* Bool -- check whether current file is apex code file and has local cache
"""
if not file_name: return False
# Get toolingapi settings
settings = context.get_settings()
# Check whether current file is subscribed component
attributes = get_file_attributes(file_name)
metadata_folder = attributes["metadata_folder"]
if metadata_folder not in settings["all_metadata_folders"]:
sublime.status_message("Not valid SFDC component")
return False
# Check whether project of current file is active project
default_project_name = settings["default_project_name"]
if default_project_name.lower() not in file_name.lower():
sublime.status_message('This project is not active project');
return False
# Check whether active component is in active project
if check_cache:
component_attribute, component_name = get_component_attribute(file_name)
if not component_attribute:
sublime.status_message("Not found the attribute of this component")
return False
return True
def display_active_project(view):
""" Display the default project name in the sidebar
"""
settings = context.get_settings()
if not settings: return # Fix plugin loading issue
display_message = "Default Project => %s (v%s.0)" % (
settings["default_project_name"],
settings["api_version"]
)
view.set_status('default_project', display_message)
def add_project_to_workspace(settings):
"""Add new project folder to workspace
Just Sublime Text 3 can support this method
"""
workspace = settings["workspace"]
dpn = settings["default_project_name"]
file_exclude_patterns = settings["file_exclude_patterns"]
folder_exclude_patterns = settings["folder_exclude_patterns"]
switch_to_folder = {
"path": workspace,
"file_exclude_patterns": file_exclude_patterns,
"folder_exclude_patterns": folder_exclude_patterns
}
# Store project data to file in current workspace
if not os.path.exists(workspace): os.makedirs(workspace)
project_file_path = os.path.join(workspace, "%s.sublime-project" % dpn)
with open(project_file_path, "wb") as fp:
fp.write(json.dumps({"folders":[switch_to_folder]}, indent=4).encode("utf-8"))
project_data = sublime.active_window().project_data()
if not project_data: project_data = {}
folders = project_data.get("folders", [])
# If the workspace is already exist in project data,
# just update the patters, if not, add the workspace to it
for folder in folders:
folder_path = folder["path"]
# Parse windows path to AS-UNIX
if "\\" in folder_path:
folder_path = folder_path.replace("\\", "/")
if "\\" in workspace:
workspace = workspace.replace("\\", "/")
if folder_path == workspace:
folder["file_exclude_patterns"] = file_exclude_patterns;
folder["folder_exclude_patterns"] = folder_exclude_patterns
else:
folders.append(switch_to_folder)
else:
folders.append(switch_to_folder)
sublime.active_window().set_project_data({"folders": folders})
def get_completion_list(meta_type, meta_folder):
""" Get the name list by specified metadataObject
Arguments:
metadata_dir -- directory of metadataObject
Return:
names -- elements in the specified metadataObject folder
"""
settings = context.get_settings()
elements = []
completion_list = []
metadata_dir = os.path.join(settings["workspace"], "src", meta_folder)
for parent, dirnames, filenames in os.walk(metadata_dir):
for _file in filenames:
if _file.endswith("-meta.xml"): continue
base, full_name = os.path.split(_file)
name = full_name[:full_name.rfind(".")]
# Some metadata type have folders
if parent != metadata_dir:
folder = os.path.split(parent)[1]
# Document, Email, Dashboard or Report
if meta_type in settings["metadata_objects_in_folder"]:
# Add folder to list
if folder not in elements:
elements.append(folder)
completion_list.append(("%s\t%s Folder" % (folder, meta_type), folder))
# Add files in folder to list
element = "%s/%s" % (folder, name)
elements.append(element)
completion_list.append(("%s\t%s" % (element, meta_type), element))
continue
# AuraDefinitionBundle
if meta_folder == "aura" and folder not in elements:
elements.append(folder)
completion_list.append(("%s\t%s" % (folder, meta_type), folder))
continue
# Others
elif name not in elements:
elements.append(name)
completion_list.append(("%s\t%s" % (name, meta_type), name))
return completion_list
def get_metadata_elements(metadata_dir):
""" Get the name list by specified metadataObject
Arguments:
metadata_dir -- directory of metadataObject
Return:
names -- elements in the specified metadataObject folder
"""
elements = []
for parent, dirnames, filenames in os.walk(metadata_dir):
for _file in filenames:
if _file.endswith("-meta.xml"): continue
base, full_name = os.path.split(_file)
name = full_name[:full_name.rfind(".")]
elements.append(name)
return elements
def export_role_hierarchy(records):
settings = context.get_settings()
top_roles = [] # Role hierarchy
rolemap = {} # Define roleId => role
for r in records:
# Build map
rolemap[r["Id"]] = r
if not r["ParentRoleId"]:
top_roles.append(r)
# Start to write role name to csv
rows = []
for role in sorted(top_roles, key=lambda k : k['Name']):
rows.append(role["Name"])
append_child_roles(rolemap, role["Id"], rows, 1,
settings["include_users_in_role_hierarchy"])
outputdir = settings["workspace"]+ "/.export/Role"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
outputfile = outputdir+"/hierarchy.csv"
with open(outputfile, "wb") as fp:
fp.write("\n".join(rows).encode("utf-8"))
return outputfile
def append_child_roles(rolemap, role_id, rows, level, include_users):
child_roles = []
for role in rolemap.values():
if role["ParentRoleId"] == role_id:
child_roles.append(role)
for role in sorted(child_roles, key=lambda k : k['Name']):
row = level * "," + role["Name"]
# If include_users is true, Include active user list after role name
if include_users:
if role["Users"]:
users = role["Users"]
usernames = []
for record in users["records"]:
full_name = "%s %s(%s)" % (
record["LastName"],
record["FirstName"] if record["FirstName"] else "",
record["Username"]
)
usernames.append(full_name)
row += ',"%s"' % "\n".join(usernames)
else:
row += ', No Active Users'
rows.append(row)
append_child_roles(rolemap, role["Id"], rows, level + 1, include_users)
def export_profile_settings():
settings = context.get_settings()
# Read all profile names
profile_dir = os.path.join(settings["workspace"], "src", "profiles")
if not os.path.exists(profile_dir):
Printer.get("error").write("Profiles directory can not be found, please execute `retrieve all` command")
return
Printer.get("log").write_start().write("Start to read all file name in profile folder")
profiles = get_metadata_elements(profile_dir)
profile_settings = {}
sobject_names = []
tab_names = []
sobject_fields = {}
permission_names = []
for profile in profiles:
# Escape profile name, for example,
# "Custom%3A Sales Profile" changed to "Custom: Sales Profile"
unquoted_profile = urllib.parse.unquote(unescape(profile, {"'": "'", """: '"'}))
Printer.get("log").write("Parsing the profile security settings of "+unquoted_profile)
profile_file = os.path.join(profile_dir, profile+".profile")
result = xmltodict.parse(open(profile_file, "rb").read())
result = result["Profile"]
profile_settings[unquoted_profile] = {}
# Some profiles don't have objectPermissions
if "objectPermissions" in result:
sobjects_permission = {}
object_permissions = result["objectPermissions"]
# Some profiles just only have one objectPermissions
if isinstance(result["objectPermissions"], dict):
object_permissions = [object_permissions]
for op in object_permissions:
sobjects_permission[op["object"]] = op
if op["object"] not in sobject_names:
sobject_names.append(op["object"])
profile_settings[unquoted_profile]["objectPermissions"] = sobjects_permission
# Parsing tabVisibilities as {}
if "recordTypeVisibilities" in result:
pass
# Parsing fieldPermission as {}
if "fieldPermissions" in result:
field_permissions = {}
fps = result["fieldPermissions"];
if isinstance(fps, dict): fps = [fps]
for fp in fps:
# Parse the field
sobject, field = fp["field"].split(".")
# Keep object => [fields] dict
if sobject in sobject_fields:
if field not in sobject_fields[sobject]:
sobject_fields[sobject].append(field)
else:
sobject_fields[sobject] = [field]
# Parse fields to field_permissions
field_permissions[fp["field"]] = fp
profile_settings[unquoted_profile]["fieldPermissions"] = field_permissions
# Parsing tabVisibilities as {"tabName1": "visibility", "tabName2": "Visibility"}
if "tabVisibilities" in result:
tab_visibilities = {}
# Some profiles just only have one tabVisibilities
tvs = result["tabVisibilities"]
if isinstance(tvs, dict): tvs = [tvs]
for tv in tvs:
tab_visibilities[tv["tab"]] = tv["visibility"]
if tv["tab"] not in tab_names:
tab_names.append(tv["tab"])
profile_settings[unquoted_profile]["tabVisibilities"] = tab_visibilities
# Parsing userPermissions as {"ApiEnabled": true, ""AllowUniversalSearch"": false}
if "userPermissions" in result:
user_permissions = {}
# Some profiles just only have one userPermissions
ups = result["userPermissions"]
if isinstance(ups, dict): ups = [ups]
for up in ups:
user_permissions[up["name"]] = up["enabled"]
if up["name"] not in permission_names:
permission_names.append(up["name"])
profile_settings[unquoted_profile]["userPermissions"] = user_permissions
# Get the unescaped profiles
profiles = sorted(list(profile_settings.keys()))
#########################################
# 1. Export objectPermissions
#########################################
# Define object CRUD
cruds = [
"allowRead", "allowCreate", "allowEdit",
"allowDelete", "modifyAllRecords",
"viewAllRecords"
]
cruds_translation = {
"allowRead": "Read",
"allowCreate": "Create",
"allowEdit": "Edit",
"allowDelete": "Delete",
"modifyAllRecords": "ModifyAll",
"viewAllRecords": "ViewAll"
}
# Define the column that contains profile
profile_headers = ["Object"]
for profile in profiles:
profile_headers.append(profile)
for i in range(len(cruds) - 1):
profile_headers.append("")
# Define the column
crud_headers = [""]
for profile in profiles:
for crud in cruds:
crud_headers.append(cruds_translation[crud])
sobject_names = sorted(sobject_names)
all_rows = [",".join(profile_headers), ",".join(crud_headers)]
for sobject in sobject_names:
rows = [sobject]
for profile in profiles:
# Some standard sObject is not configurable
if "objectPermissions" in profile_settings[profile]:
if sobject in profile_settings[profile]["objectPermissions"]:
object_permission = profile_settings[profile]["objectPermissions"][sobject]
for crud in cruds:
rows.append("√" if object_permission[crud] == "true" else "")
else:
for crud in cruds:
rows.append("")
else:
for crud in cruds:
rows.append("")
all_rows.append(",".join(rows))
outputdir = settings["workspace"]+ "/.export/profile"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
Printer.get("log").write("Writing profile object security to "+outputdir)
with open(outputdir+"/ObjectPermissions.csv", "wb") as fp:
fp.write("\n".join(all_rows).encode("utf-8"))
#########################################
# 2. Export tabVisibilities
#########################################
# Populate Header
headers = ["Tab Name"]
headers.extend(profiles)
# Populate Rows
tab_names = sorted(tab_names)
all_rows = [",".join(headers)]
for tab_name in tab_names:
rows = [tab_name]
for profile in profiles:
if "tabVisibilities" in profile_settings[profile]:
if tab_name in profile_settings[profile]["tabVisibilities"]:
rows.append(profile_settings[profile]["tabVisibilities"][tab_name])
else:
rows.append("TabHidden")
else:
rows.append("No Tab Permission")
all_rows.append(",".join(rows))
Printer.get("log").write("Writing profile tab visibility to "+outputdir)
with open(outputdir+"/TabVisibilities.csv", "wb") as fp:
fp.write("\n".join(all_rows).encode("utf-8"))
#########################################
# 3. Export tabVisibilities
#########################################
# Populate Header
headers = ["Permission"]
headers.extend(profiles)
# Populate Rows
all_rows = [",".join(headers)]
permission_names = sorted(permission_names)
for permission_name in permission_names:
rows = [permission_name]
for profile in profiles:
if permission_name in profile_settings[profile]["userPermissions"]:
if profile_settings[profile]["userPermissions"][permission_name] == "true":
rows.append("√")
else:
rows.append("")
else:
rows.append("")
all_rows.append(",".join(rows))
Printer.get("log").write("Writing profile user permission to "+outputdir)
with open(outputdir+"/UserPermissions.csv", "wb") as fp:
fp.write("\n".join(all_rows).encode("utf-8"))
#########################################
# 4. Export Field Level Security
#########################################
# Define object CRUD
rus = [
"readable", "editable"
]
# Define the column that contains profile
profile_headers = ["Object", "Field"]
for profile in profiles:
profile_headers.append(profile)
for i in range(len(rus) - 1):
profile_headers.append("")
# Define the column
ru_headers = ["", ""]
for profile in profiles:
for ru in rus:
ru_headers.append(ru.capitalize())
all_rows = [",".join(profile_headers), ",".join(ru_headers)]
for sobject in sorted(sobject_fields.keys()):
for field in sobject_fields[sobject]:
rows = [sobject, field]
object_field = "%s.%s" % (sobject, field)
for profile in profiles:
if object_field in profile_settings[profile]["fieldPermissions"]:
field_permission = profile_settings[profile]["fieldPermissions"][object_field]
for ru in rus:
rows.append("√" if field_permission[ru] == "true" else "")
else:
for ru in rus:
rows.append("")
# Every field is separated line
all_rows.append(",".join(rows))
outputdir = settings["workspace"]+ "/.export/profile"
if not os.path.exists(outputdir):
os.makedirs(outputdir)
Printer.get("log").write("Writing profile object security to "+outputdir)
with open(outputdir+"/FieldLevelSecurity.csv", "wb") as fp:
fp.write("\n".join(all_rows).encode("utf-8"))
def build_metadata(csvfile, options):
""" Convert JSON to custom labels metadata """
rjson = convert_csv_to_json(csvfile, options.get("xmlNodes"))
custom_labels_json = {
options.get("root"): {
"@xmlns": "http://soap.sforce.com/2006/04/metadata",
options.get("leaf"): rjson
}
}
return xmltodict.unparse(custom_labels_json)
def convert_csv_to_json(csvfile, xmlNodes):
""" Convert CSV to JSON format"""
fp = open(csvfile, "rt", encoding="utf8"); # Open CSV file
next(fp) # Ignore header
csv_reader = csv.DictReader(fp, xmlNodes)
tempjson = os.path.join(os.path.split(csvfile)[0], "temp.json")
with open(tempjson, 'w') as fp:
fp.write(json.dumps([r for r in csv_reader]))
rjson = json.loads(open(tempjson).read())
os.remove(tempjson)
return rjson
|
StarcoderdataPython
|
3307909
|
from django.apps import AppConfig
class RouteplannerConfig(AppConfig):
name = 'routeplanner'
|
StarcoderdataPython
|
1952411
|
import unittest
from bfg9000.builtins.version import bfg9000_required_version, bfg9000_version
from bfg9000.versioning import bfg_version, VersionError
class TestRequiredVersion(unittest.TestCase):
def test_bfg_version(self):
bfg9000_required_version('>=0.1.0')
self.assertRaises(VersionError, bfg9000_required_version, '<=0.1.0')
def test_python_version(self):
bfg9000_required_version(python_version='>=2.7.0')
self.assertRaises(VersionError, bfg9000_required_version, None,
'<=2.0.0')
def test_both_versions(self):
bfg9000_required_version('>=0.1.0', '>=2.7.0')
self.assertRaises(VersionError, bfg9000_required_version, '<=0.1.0',
'<=2.0.0')
class TestVersion(unittest.TestCase):
def test_version(self):
self.assertEqual(bfg9000_version(), bfg_version)
|
StarcoderdataPython
|
9779450
|
<gh_stars>1-10
from __future__ import absolute_import, print_function
from sentry.db.models import (
BoundedBigIntegerField, Model, sane_repr
)
class LatestRelease(Model):
"""
Tracks the latest release of a given repository for a given environment.
"""
__core__ = False
repository_id = BoundedBigIntegerField()
# 0 for 'all environments'
environment_id = BoundedBigIntegerField()
release_id = BoundedBigIntegerField()
deploy_id = BoundedBigIntegerField(null=True)
commit_id = BoundedBigIntegerField(null=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_latestrelease'
unique_together = (('repository_id', 'environment_id'),)
__repr__ = sane_repr('repository_id', 'environment_id')
|
StarcoderdataPython
|
6655930
|
"""
Given two 2D polygons write a function that calculates the IoU of their areas,
defined as the area of their intersection divided by the area of their union.
The vertices of the polygons are constrained to lie on the unit circle and you
can assume that each polygon has at least 3 vertices, given and in sorted order.
- You are free to use basic math functions/libraries (sin, cos, atan2, numpy etc)
but not geometry-specific libraries (such as shapely).
- You are free to look up geometry-related formulas, optionally copy paste in
short code snippets and adapt them to your needs.
- We do care and evaluate your general code quality, structure and readability
but you do not have to go crazy on docstrings.
"""
import numpy as np
from math import atan2, sqrt
def computeLine(p1, p2):
"""
Computing the lines parameters m and b given two points
"""
dx = p1[0] - p2[0]
return (p1[1] - p2[1]) / dx, (p1[0] * p2[1] - p2[0] * p1[1]) / dx
def cleanPoly(poly):
"""
Removing duplicate points from polygon list of vertices
"""
cleanedPoly = []
for point in poly:
if isNotInList(point, cleanedPoly):
cleanedPoly.append(point)
return cleanedPoly
def computeIntersection(p11, p12, p21, p22, tol=1e-6):
"""
Computing intersection of two lines
"""
# Compute difference in x co-ordinates for two lines
dx1, dx2 = p11[0] - p12[0], p21[0] - p22[0]
# If both difference in x are below tolerance, the lines are vertical and parallel => no intersection
if abs(dx1) < tol and abs(dx2) < tol:
return None
# If just the first line difference in x is below tolerance, the first line is vertical
elif abs(dx1) < tol:
x = (p11[0] + p12[0])/2 # x co-ordinate of intersection
m2, b2 = computeLine(p21, p22) # get second line parameters
return x, m2 * x + b2
# If just the second line difference in x is below tolerance, the second line is vertical
elif abs(dx2)< tol:
x = (p21[0] + p22[0]) / 2 # x co-ordinate of intersection
m1, b1 = computeLine(p11, p12) # get first line parameters
return x, m1 * x + b1
# If none of the differences in x difference is below tolerance, none of the line is vertical
else:
m1,b1 = computeLine(p11, p12) # get first line parameters
m2,b2 = computeLine(p21, p22) # get second line parameters
dm = m1 - m2 # difference in slope
# if difference in slope is below tolerance, the two lines are parallel i.e. no intersection
if abs(dm) < tol:
return None
# else, compute intersection x, y
else:
return (b2 - b1) / dm, (m1 * b2 - b1 * m2) / dm
def distanceBetween(p1, p2):
"""
Euclidean distance between two points
"""
return sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def iou(poly1, poly2):
"""
Computing intersection over union for given polygon
"""
poly1, poly2 = cleanPoly(poly1), cleanPoly(poly2) # clean polygons of duplicate points
poly3 = polyIntersection(poly1, poly2) # compute intersected polygon
# if intersection exists (its a convex polygon with atleat 3 vertices)
if poly3:
# converting polygons to np arrays
poly1 = np.array(poly1, dtype = np.float32)
poly2 = np.array(poly2, dtype = np.float32)
poly3 = np.array(poly3, dtype = np.float32)
# computing area of intersection
intersectionArea = polyArea(poly3[:,0], poly3[:,1])
# computing area of union (= area of polygon - area of intersection)
unionArea = polyArea(poly1[:,0], poly1[:,1]) + polyArea(poly2[:,0], poly2[:,1]) - intersectionArea
# IoU = area of intersection / area of union
return intersectionArea / unionArea
# else, ploygons do not intersect i.e. IoU = 0.0
else:
return 0.0
def isNotInList(point, list, tol=1e-6):
"""
Checking if the point is already in the list
"""
for p in list:
if distanceBetween(point, p) < tol:
return False
return True
def polyArea(x, y):
"""
Computing the area of polygon using Shoeloace formula given ordered x and y coordinates of its vertices. (https://en.wikipedia.org/wiki/Shoelace_formula).
"""
area = 0.5 * np.abs(np.dot(y, np.roll(x,1)) - np.dot(x, np.roll(y,1)))
return area
def polyIntersection(poly1, poly2):
"""
Computing the intersection between two polygons
"""
intersections, orientations = [], [] # list of intersection points and respective orientations w.r.t the origin
n1, n2 = len(poly1), len(poly2) # no. of vertices of two polygons
# for each vertex in 1st polygon
for i, currentVertex1 in enumerate(poly1):
previousVertex1 = poly1[(i + n1 - 1) % n1] # pervious vertex of 1st polygon
# bounding box of current edge of 1st polygon
xMax = max(currentVertex1[0], previousVertex1[0])
xMin = min(currentVertex1[0], previousVertex1[0])
yMax = max(currentVertex1[1], previousVertex1[1])
yMin = min(currentVertex1[1], previousVertex1[1])
# for each vertex in 2nd polygon
for j, currentVertex2 in enumerate(poly2):
previousVertex2 = poly2[(j + n2 - 1) % n2]
# compute intersection between 2 lines of 2 polygons
intersect = computeIntersection(currentVertex1, previousVertex1, currentVertex2, previousVertex2)
# if intersection exists, it is in the bounding box and has not been already accounted
if intersect:
if xMin <= intersect[0] <= xMax and yMin <= intersect[1] <= yMax:
if isNotInList(intersect, intersections):
intersections.append(intersect) # appending it to the list
orientations.append(atan2(intersect[1], intersect[0])) # appending to the corresponding orientation
# if fewer than 3 vertices
if len(intersections) < 3:
return None # its not a polygon (intersection is null)
else:
#sorting the vertices of polygon by orientation
intesectionPoly = [x for _, x in sorted(zip(orientations, intersections))]
return intesectionPoly
# --------------------------------------------------------
if __name__ == "__main__":
cases = []
# Case 1: a vanilla case (see https://imgur.com/a/dSKXHPF for a diagram)
poly1 = [
(-0.7071067811865475, 0.7071067811865476),
(0.30901699437494723, -0.9510565162951536),
(0.5877852522924729, -0.8090169943749476),
]
poly2 = [
(1, 0),
(0, 1),
(-1, 0),
(0, -1),
(0.7071067811865475, -0.7071067811865477),
]
cases.append((poly1, poly2, "simple case", 0.12421351279682288))
# Case 2: another simple case
poly1 = [
(1, 0),
(0, 1),
(-0.7071067811865476, -0.7071067811865476),
]
poly2 = [
(-0.1736481776669303, 0.984807753012208),
(-1, 0),
(0, -1),
]
cases.append((poly1, poly2, "simple case 2", 0.1881047657147776))
# Case 3: yet another simple case, note the duplicated point
poly1 = [
(0, -1),
(-1, 0),
(-1, 0),
(0, 1),
]
poly2 = [
(0.7071067811865476, 0.7071067811865476),
(-0.7071067811865476, 0.7071067811865476),
(-0.7071067811865476, -0.7071067811865476),
(0.7071067811865476, -0.7071067811865476),
(0.7071067811865476, -0.7071067811865476),
]
cases.append((poly1, poly2, "simple case 3", 0.38148713966109243))
# Case 4: shared edge
poly1 = [
(-1, 0),
(-0.7071067811865476, -0.7071067811865476),
(0.7071067811865476, -0.7071067811865476),
(1, 0),
]
poly2 = [
(0, 1),
(-1, 0),
(1, 0),
]
cases.append((poly1, poly2, "shared edge", 0.0))
# Case 5: same polygon
poly1 = [
(0, -1),
(-1, 0),
(1, 0),
]
poly2 = [
(0, -1),
(-1, 0),
(1, 0),
]
cases.append((poly1, poly2, "same same", 1.0))
# Case 6: polygons do not intersect
poly1 = [
(-0.7071067811865476, 0.7071067811865476),
(-1, 0),
(-0.7071067811865476, -0.7071067811865476),
]
poly2 = [
(0.7071067811865476, 0.7071067811865476),
(1, 0),
(0.7071067811865476, -0.7071067811865476),
]
cases.append((poly1, poly2, "no intersection", 0.0))
import time
t0 = time.time()
for poly1, poly2, description, expected in cases:
computed = iou(poly1, poly2)
print('-'*20)
print(description)
print("computed:", computed)
print("expected:", expected)
print("PASS" if abs(computed - expected) < 1e-8 else "FAIL")
# details here don't matter too much, but this shouldn't be seconds
dt = (time.time() - t0) * 1000
print("done in %.4fms" % dt)
|
StarcoderdataPython
|
6513063
|
<gh_stars>1-10
from cProfile import run
from playsound import playsound
from gtts import gTTS
import speech_recognition as sr
import os
import time
from datetime import date, datetime
import random
from random import choice
import webbrowser
import psutil
from plyer import notification
import time
import pywhatkit as k
r = sr.Recognizer()
def record(ask=False):
playsound("DING.mp3")
with sr.Microphone() as source:
if ask:
print(ask)
audio = r.listen(source)
voice = ""
try:
voice = r.recognize_google(audio, language="tr-TR")
except sr.UnknownValueError:
speak("Anlayamadım")
except sr.RequestError:
speak("Sistem çalışmıyor")
return voice
def ingrecord(ask=False):
playsound(r"c:\\Users\\PC\\OneDrive\\Masaüstü\\Asistan\\DING.mp3")
with sr.Microphone() as source:
if ask:
print(ask)
audio = r.listen(source)
voice = ""
try:
voice = r.recognize_google(audio, language="en-EN")
except sr.UnknownValueError:
speak("Anlayamadım")
except sr.RequestError:
speak("Sistem çalışmıyor")
return voice
def almrecord(ask=False):
playsound(r"c:\\Users\\PC\\OneDrive\\Masaüstü\\Asistan\\DING.mp3")
with sr.Microphone() as source:
if ask:
print(ask)
audio = r.listen(source)
voice = ""
try:
voice = r.recognize_google(audio, language="de-DE")
except sr.UnknownValueError:
speak("Anlayamadım")
except sr.RequestError:
speak("Sistem çalışmıyor")
return voice
def response(voice):
if "merhaba" in voice:
speak("sana da merhaba")
if "naber" in voice:
speak("iyi senden naber")
if "beni duyuyor musun" in voice:
speak("Evet duyuyorum")
if "ne" in voice:
speak("ne ne")
if "nasılsın" in voice:
speak("iyiyim sen")
if "teşekkür ederim" in voice or "teşekkürler" in voice:
speak("rica ederim")
if "bu gün nasılsın" in voice:
speak("İyiyim teşekkürler sen nasılsın?")
if "iyiyim" in voice:
speak("Hep iyi ol")
if "kötüyüm" in voice:
speak("Buna üzüldüm")
if "görüşürüz" in voice or "bay bay" in voice or "kapan" in voice or "baybay" in voice:
speak("görüşürüz")
os.system("TASKKILL /F /IM Asistan.exe")
if "söylediğim her şeyi not et" in voice:
speak("Dosya ismi ne olsun?")
txtfile = record() + ".txt"
speak("Hangi dilde kayıt istiyorsun?")
dil = record()
dil = dil.lower()
if "türkçe" in dil:
speak("Söylemeye başlayabilirsin.")
while True:
thetxt = record()
if "not yazmayı bitir" in thetxt:
speak("Not tutma işlemi sonlandırıldı.")
f.close()
os.system("TASKKILL /F /IM Asistan.exe")
f = open("notlar/"+txtfile, "a", encoding="utf-8")
f.writelines(thetxt+" ")
if "almanca" in dil:
speak("Söylemeye başlayabilirsin.")
while True:
thetxt = almrecord()
if "not yazmayı bitir" in thetxt:
speak("Not tutma işlemi sonlandırıldı.")
f.close()
os.system("TASKKILL /F /IM Asistan.exe")
f = open("notlar/"+txtfile, "a", encoding="utf-8")
f.writelines(thetxt)
if "ingilizce" in dil:
speak("Söylemeye başlayabilirsin.")
while True:
thetxt = ingrecord()
if "not yazmayı bitir" in thetxt:
speak("Not tutma işlemi sonlandırıldı.")
f.close()
os.system("TASKKILL /F /IM Asistan.exe")
f = open("notlar/"+txtfile, "a", encoding="utf-8")
f.writelines(thetxt)
if "not al" in voice:
speak("Dosya ismi ne olsun?")
txtfile = record() + ".txt"
speak("Ne not tutmamı istersin?")
thetxt = record()
f = open("notlar/"+txtfile, "w", encoding="utf-8")
f.writelines(thetxt)
f.close()
speak("İstediğin notu aldım")
if "not sil" in voice:
speak("Hangi notu sileyim?")
txtfile = record() + ".txt"
notes = os.listdir(r"C:\\Users\\PC\\OneDrive\\Masaüstü\\Asistan\\notlar")
icermek = notes.__contains__(txtfile)
if icermek:
os.remove(r"C:\\Users\\PC\\OneDrive\\Masaüstü\\Asistan\\notlar\\" + txtfile)
speak("İstediğin notu sildim.")
else:
speak("silmek istediğin dosya mevcut değil.")
if "hangi gündeyiz" in voice or "günlerden ne" in voice:
today = time.strftime("%A")
today.capitalize()
if today == "Monday":
today = "Pazartesi"
elif today == "Tuesday":
today = "Salı"
elif today == "Wednesday":
today = "Çarşamba"
elif today == "Thursday":
today = "Perşembe"
elif today == "Friday":
today = "Cuma"
elif today == "Saturday":
today = "Cumartesi"
elif today == "Sunday":
today = "Pazar"
speak(today)
if "saat kaç" in voice:
clock = datetime.now().strftime("%H:%M")
speak(clock)
if "pil" in voice:
pil = psutil.sensors_battery()
yuzde = pil.percent
speak(f"Kalan pil: yüzde{yuzde}")
if "bilgisayarı yeniden başlat" in voice or "bilgisayar yeniden başlat" in voice or "pc reset" in voice:
speak("Bilgisayarı yeniden başlatma mı ister misin?")
onay = record()
onay = onay.lower()
if "evet" in onay:
speak("Sistem yeniden başlatılıyor")
os.system("shutdown /r /t 2")
os.system("TASKKILL /F /IM Asistan.exe")
if "hayır" in onay:
speak("İşlem iptal edildi")
if "google" in voice:
speak("Googleda ne aramamı istersin?")
search = record()
url = "https://www.google.com/search?q={}".format(search)
webbrowser.get().open(url)
speak("İşte google sonuçları")
if "github" in voice or "proje" in voice:
speak("Githubda ne aramamı istersin?")
search = record()
url = "https://github.com/search?q={}".format(search)
webbrowser.get().open(url)
speak("İşte github sonuçları.")
if "uygulama aç" in voice:
speak("Hangi uygulamayı açmamı istiyorsun?")
runApp = record()
runApp = runApp.lower()
if "discord" in runApp:
os.startfile(r"C:\\Users\\PC\\OneDrive\\Masaüstü\\helpers\\Discord.lnk")
speak("Discordu açtım.")
os.system("TASKKILL /F /IM Asistan.exe")
elif "visual studio code" in runApp:
os.startfile(r"C:\\Users\\PC\\OneDrive\\Masaüstü\\Yazilim\\Visual Studio Code.lnk")
speak("Visual Studio Code'yi açtım.")
os.system("TASKKILL /F /IM Asistan.exe")
else:
speak("İstediğin uygulama çalıştırma listemde yok.")
if "mesaj yolla" in voice or "mesaj gönder" in voice or "mesaj at" in voice:
speak("Kime mesaj yollamak istiyorsunuz?")
saat = datetime.now().strftime("%H")
dk = datetime.now().strftime("%M")
user = record()
user = user.lower()
if "anne" in user:
speak("Ne mesaj yollamak istiyorsun?")
mesaj = record()
if mesaj:
speak(f"Annenize {mesaj} mesajını yollamak istiyor musunuz?")
onay = record()
onay = onay.lower()
if "evet" in onay:
k.sendwhatmsg("+90 1234567890",mesaj, int(saat),int(dk))
speak("Mesaj gönderildi.")
if "hayır" in onay:
speak("İşlem iptal edildi.")
elif "baba" in user:
speak("Ne mesaj yollamak istiyorsunuz?")
mesaj = record()
if mesaj:
speak(f"Babanıza {mesaj} mesajını yollamak istiyor musunuz?")
onay = record()
onay = onay.lower()
if "evet" in onay:
k.sendwhatmsg("+90 1234567890",mesaj, int(saat),int(dk) + 1)
speak("Mesaj gönderildi.")
if "hayır" in onay:
speak("İşlem iptal edildi.")
elif "arkadaş" in user:
speak("Ne mesaj yollamak istiyorsunuz?")
mesaj = record()
if mesaj:
speak(f"Arkadaşınıza {mesaj} mesajını yollamak istiyor musunuz?")
onay = record()
onay = onay.lower()
if "evet" in onay:
k.sendwhatmsg("+90 1234567890",mesaj, int(saat),int(dk) + 1)
speak("Mesaj gönderildi.")
if "hayır" in onay:
speak("İşlem iptal edildi.")
else:
speak("Bu kişi kişilerde yok.")
if "müzik aç" in voice:
music = os.listdir(r"C:\\Users\\PC\\OneDrive\\Masaüstü\\music")
music = random.choice(music)
os.startfile(rf"C:\\Users\\PC\\OneDrive\\Masaüstü\\music\\{music}")
speak("Senin için bir müzik açtım.")
def speak(string):
tts = gTTS(text=string, lang="tr", slow=False)
file = "answer.mp3"
tts.save(file)
playsound(file)
os.remove(file)
while True:
wake = record()
if wake != '':
wake = wake.lower()
print(wake)
response(wake)
|
StarcoderdataPython
|
1943840
|
<gh_stars>1-10
_base_="../base-${shortname}-config.py"
# this will merge with the parent
model=dict(pretrained='${pretrained}')
# epoch related
total_iters=${iter}
checkpoint_config = dict(interval=total_iters)
|
StarcoderdataPython
|
77044
|
# Keplerian fit configuration file for HIP11915
# 15 Mar 2022
# Packages
import pandas as pd
import os
import numpy as np
import radvel
import astropy.units as u
# Global planetary system and datasets parameters
starname = 'HIP11915'
nplanets = 2
instnames = ['HARPS-A', 'HARPS-B']
ntels = len(instnames)
fitting_basis = 'per tc secosw sesinw k'
planet_letters = {1: 'b', 2: 'c'}
# Load data
# ASCII file with columns named: "time,mnvel,errvel,tel"
# RV are expected to be in m/s
data = pd.read_csv('https://raw.githubusercontent.com/thiagofst/HIP11915/main/A/HIP11915_A.txt', sep = ',')
t = np.array(data['time'])
vel = np.array(data['mnvel'])
errvel = np.array(data['errvel'])
telgrps = data.groupby('tel').groups
bjd = 0.
# Priors (initial guesses)
anybasis_params = radvel.Parameters(nplanets, basis = 'per tc secosw sesinw k')
anybasis_params['per1'] = radvel.Parameter(value = 3703.703) # Orbital period
anybasis_params['tc1'] = radvel.Parameter(value = 2456560.) # Time of inferior conjunction
anybasis_params['secosw1'] = radvel.Parameter(value = 0.1) # sqrt(e)cos(w)
anybasis_params['sesinw1'] = radvel.Parameter(value = 0.1) # sqrt(e)sin(w)
anybasis_params['k1'] = radvel.Parameter(value = 12.5) # RV semi-amplutude
anybasis_params['per2'] = radvel.Parameter(value = 2941.176) # Orbital period
anybasis_params['tc2'] = radvel.Parameter(value = 2457283.) # Time of inferior conjunction
anybasis_params['secosw2'] = radvel.Parameter(value = 0.1) # sqrt(e)cos(w)
anybasis_params['sesinw2'] = radvel.Parameter(value = 0.1) # sqrt(e)sin(w)
anybasis_params['k2'] = radvel.Parameter(value = 5.6) # RV semi-amplutude
time_base = np.median(t)
anybasis_params['dvdt'] = radvel.Parameter(value = 0.0)
anybasis_params['curv'] = radvel.Parameter(value = 0.0)
# Velocity zero-points for each instrument
anybasis_params['gamma_HARPS-B'] = radvel.Parameter(value = 0)
anybasis_params['gamma_HARPS-A'] = radvel.Parameter(value = 0)
# Jitter term for each instrument
anybasis_params['jit_HARPS-B'] = radvel.Parameter(value = 0.)
anybasis_params['jit_HARPS-A'] = radvel.Parameter(value = 0.)
# Convert input orbital parameters into the fitting basis
params = anybasis_params.basis.to_any_basis(anybasis_params, fitting_basis)
# Set vary parameters
anybasis_params['dvdt'].vary = False
anybasis_params['curv'].vary = False
anybasis_params['jit_HARPS-B'].vary = True
anybasis_params['jit_HARPS-A'].vary = True
# Priors and widths
priors = [
radvel.prior.EccentricityPrior(nplanets), # Keeps eccentricity < 1
# Other options:
radvel.prior.Gaussian('tc1', anybasis_params['tc1'].value, 100.0),
radvel.prior.Gaussian('per1', anybasis_params['per1'].value, 100),
radvel.prior.Gaussian('k1', anybasis_params['k1'].value, 0.1),
radvel.prior.Gaussian('sesinw1', anybasis_params['sesinw1'].value, 0.1),
radvel.prior.Gaussian('secosw1', anybasis_params['secosw1'].value, 0.1),
radvel.prior.Gaussian('tc2', anybasis_params['tc2'].value, 100.0),
radvel.prior.Gaussian('per2', anybasis_params['per2'].value, 100),
radvel.prior.Gaussian('k2', anybasis_params['k2'].value, 0.1),
radvel.prior.Gaussian('sesinw2', anybasis_params['sesinw2'].value, 0.3),
radvel.prior.Gaussian('secosw2', anybasis_params['secosw2'].value, 0.3),
]
# Stellar parameters
stellar = dict(mstar=0.993, mstar_err = 0.005) # https://arxiv.org/abs/1408.4130
|
StarcoderdataPython
|
4831423
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cirq_ionq._version import __version__
from cirq_ionq.calibration import Calibration
from cirq_ionq.ionq_devices import IonQAPIDevice, decompose_to_device
from cirq_ionq.ionq_exceptions import (
IonQException,
IonQNotFoundException,
IonQUnsuccessfulJobException,
)
from cirq_ionq.job import Job
from cirq_ionq.results import QPUResult, SimulatorResult
from cirq_ionq.sampler import Sampler
from cirq_ionq.serializer import Serializer, SerializedProgram
from cirq_ionq.service import Service
from cirq_ionq.ionq_native_gates import GPIGate, GPI2Gate, MSGate
from cirq.protocols.json_serialization import _register_resolver
from cirq_ionq.json_resolver_cache import _class_resolver_dictionary
_register_resolver(_class_resolver_dictionary)
|
StarcoderdataPython
|
289627
|
<reponame>kqtqk88/or-tools<filename>tools/check_python_deps.py<gh_stars>1-10
#!/usr/bin/env python3
"""Check user python installation."""
import inspect
import logging
import optparse
import sys
# try to import setuptools
try:
from setuptools import setup # pylint: disable=g-import-not-at-top,unused-import
from setuptools import Extension # pylint: disable=g-import-not-at-top,unused-import
from setuptools.command import easy_install # pylint: disable=g-import-not-at-top,unused-import
except ImportError:
raise ImportError("""setuptools is not installed for \"""" + sys.executable +
"""\"
Follow this link for installing instructions :
https://pypi.python.org/pypi/setuptools
make sure you use \"""" + sys.executable + """\" during the installation""")
from pkg_resources import parse_version # pylint: disable=g-import-not-at-top,unused-import
required_ortools_version = "VVVV"
required_protobuf_version = "PROTOBUF_TAG"
def notinstalled(modulename):
return modulename + """ is not installed for \"""" + sys.executable + """\"
Run \"""" + sys.executable + """ setup.py install --user\" to install it"""
def absent_version(module, modulename):
return """You are using a """ + modulename + """ module that doesn't have a __version__ attribute : """ + inspect.getfile(
module) + """\"
Run \"""" + sys.executable + """ setup.py install --user\" to upgrade.
If the problem persists, remove the site-package that contains \"""" + inspect.getfile(
module) + """\". You can do so either manually or by using pip."""
def wrong_version(module, modulename, required_version, installed_version):
return """You are using """ + modulename + """-""" + installed_version + """ : """ + inspect.getfile(
module
) + """, while the required version is : """ + required_version + """
Run \"""" + sys.executable + """ setup.py install --user\" to upgrade.
If the problem persists, remove the site-package that contains \"""" + inspect.getfile(
module) + """\". You can do so either manually or by using pip."""
def log_error_and_exit(error_message):
logging.error(error_message)
raise SystemExit
def check_absent_version(module, modulename):
if not hasattr(module, "__version__"):
log_error_and_exit(absent_version(module, modulename))
if __name__ == "__main__":
parser = optparse.OptionParser("Log level")
parser.add_option(
"-l",
"--log",
type="string",
help="Available levels are CRITICAL (3), ERROR (2), WARNING (1), INFO (0), DEBUG (-1)",
default="INFO")
options, args = parser.parse_args()
# Create the logger
try:
loglevel = getattr(logging, options.log.upper())
except AttributeError:
loglevel = {
3: logging.CRITICAL,
2: logging.ERROR,
1: logging.WARNING,
0: logging.INFO,
-1: logging.DEBUG,
}[int(options.log)]
logging.basicConfig(
format="[%(levelname)s] %(message)s", stream=sys.stdout, level=loglevel)
# Display Python Version and path
logging.info(f"Python path : {sys.executable}")
logging.info(f"Python version : {sys.version}")
# Choose the pypi package
ortools_name = "ortools"
# try to import ortools
try:
import ortools # pylint: disable=g-import-not-at-top
except ImportError:
log_error_and_exit(notinstalled(ortools_name))
# try to import protobuf
try:
import google.protobuf # pylint: disable=g-import-not-at-top
except ImportError:
log_error_and_exit(notinstalled("protobuf"))
# check ortools version
try:
check_absent_version(ortools, "ortools")
if required_ortools_version != ortools.__version__:
raise Exception
logging.info("or-tools version : " + ortools.__version__ + "\n" +
inspect.getfile(ortools))
except (AttributeError, Exception): # pylint: disable=broad-except
log_error_and_exit(
wrong_version(ortools, ortools_name, required_ortools_version,
ortools.__version__))
# check protobuf version
try:
check_absent_version(google.protobuf, "protobuf")
if required_protobuf_version != google.protobuf.__version__:
raise Exception
logging.info("protobuf version : " + google.protobuf.__version__ + "\n" +
inspect.getfile(google.protobuf))
except (AttributeError, Exception): # pylint: disable=broad-except
log_error_and_exit(
wrong_version(google.protobuf, "protobuf", required_protobuf_version,
google.protobuf.__version__))
# Check if python can load the libraries' modules
# this is useful when the library architecture is not compatbile with the
# python executable, or when the library's dependencies are not available or
# not compatible.
from ortools.constraint_solver import _pywrapcp # pylint: disable=g-import-not-at-top,unused-import
from ortools.linear_solver import _pywraplp # pylint: disable=g-import-not-at-top,unused-import
from ortools.algorithms import _pywrapknapsack_solver # pylint: disable=g-import-not-at-top,unused-import
from ortools.graph import _pywrapgraph # pylint: disable=g-import-not-at-top,unused-import
|
StarcoderdataPython
|
8210
|
<reponame>HuaichenOvO/EIE3280HW
import numpy as np
import numpy.linalg as lg
A_mat = np.matrix([
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 1],
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 0]
])
eigen = lg.eig(A_mat) # return Arr[5] with 5 different linear independent eigen values
vec = eigen[1][:, 0] # the column (eigen vector) with the largest eigen value
value = eigen[0][0] # the largest eigen value
print(vec)
print(A_mat * vec)
print(value * vec)
|
StarcoderdataPython
|
9626280
|
<reponame>cpostbitbuckets/BucketVision<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
match
Example of matching a template to an image
Derived from techniques found at http://www.pyimagesearch.com/2015/01/26/multi-scale-template-matching-using-python-opencv/
Copyright (c) 2017 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
****************************************************************************************************
"""
# import the necessary packages
import numpy as np
import imutils
import cv2
# load the image image, convert it to grayscale, and detect edges
img = cv2.imread("shirt.jpg")
img = cv2.resize(img,(640,480))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(gray, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("Template", template)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray,None)
keypoint=cv2.drawKeypoints(gray,kp,img)
cv2.imshow("KeyPoints",keypoint)
# loop over the images to find the template in
#for imagePath in range(1,2) #glob.glob("/*.jpg"):
# load the image, convert it to grayscale, and initialize the
# bookkeeping variable to keep track of the matched region
image = cv2.imread("shirt.jpg")
image = cv2.resize(image, (640,480))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
found = None
# loop over the scales of the image
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# resize the image according to the scale, and keep track
# of the ratio of the resizing
resized = imutils.resize(gray, width = int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# if the resized image is smaller than the template, then break
# from the loop
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# detect edges in the resized, grayscale image and apply template
# matching to find the template in the image
edged = cv2.Canny(resized, 50, 200)
result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
# check to see if the iteration should be visualized
# if args.get("visualize", False):
# # draw a bounding box around the detected region
# clone = np.dstack([edged, edged, edged])
# cv2.rectangle(clone, (maxLoc[0], maxLoc[1]),
# (maxLoc[0] + tW, maxLoc[1] + tH), (0, 0, 255), 2)
# cv2.imshow("Visualize", clone)
# cv2.waitKey(0)
# if we have found a new maximum correlation value, then ipdate
# the bookkeeping variable
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
# unpack the bookkeeping varaible and compute the (x, y) coordinates
# of the bounding box based on the resized ratio
(_, maxLoc, r) = found
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
# draw a bounding box around the detected result and display the image
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
|
StarcoderdataPython
|
5026967
|
<filename>desktop/core/src/desktop/lib/connectors/types.py
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
from desktop.conf import CONNECTORS_BLACKLIST, CONNECTORS_WHITELIST
from desktop.lib.exceptions_renderable import PopupException
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
CONNECTOR_TYPES = [
{
'dialect': 'hive',
'nice_name': 'Hive',
'description': 'Recommended',
'category': 'editor',
'interface': 'hiveserver2',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 10000},
{'name': 'is_llap', 'value': False}, # cf. _get_session_by_id() or create a separate connector
{'name': 'use_sasl', 'value': True},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'dialect': 'hive',
'nice_name': 'Hive',
'description': 'Via SqlAlchemy interface',
'category': 'editor',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'hive://localhost:10000'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'nice_name': 'Impala',
'dialect': 'impala',
'interface': 'hiveserver2',
'category': 'editor',
'description': '',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 21050},
{'name': 'impersonation_enabled', 'value': False},
{'name': 'use_sasl', 'value': False},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'nice_name': 'Impala',
'dialect': 'impala',
'interface': 'sqlalchemy',
'category': 'editor',
'description': 'Via SqlAlchemy interface',
'settings': [
{'name': 'url', 'value': 'impala://localhost:21050'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': True,
'has_reference_functions': True,
'has_use_statement': True,
}
},
{
'nice_name': 'Druid',
'dialect': 'druid',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'druid://localhost:8082/druid/v2/sql/'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'ksqlDB',
'dialect': 'ksql',
'interface': 'ksql',
'settings': [
{'name': 'url', 'value': 'http://localhost:8088'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': True,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': '<NAME>',
'dialect': 'flink',
'interface': 'flink',
'settings': [
{'name': 'url', 'value': 'http://localhost:8083'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SparkSQL',
'dialect': 'sparksql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'hive://localhost:10000'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': 'Via Thrift Server and SqlAlchemy interface',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SparkSQL',
'dialect': 'sparksql',
'interface': 'hiveserver2',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 10000},
{'name': 'impersonation_enabled', 'value': False},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'use_sasl', 'value': True},
],
'category': 'editor',
'description': 'Via Thrift Server and Hive interface',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': True,
}
},
{
'nice_name': 'SparkSQL',
'dialect': 'sparksql',
'interface': 'livy',
'settings': [
{'name': 'api_url', 'value': 'http://localhost:8998'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': 'Via Livy server',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': '<NAME>',
'dialect': 'phoenix',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'phoenix://localhost:8765/'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'MySQL',
'dialect': 'mysql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'mysql://username:password@localhost:3306/hue'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'PostgreSQL',
'dialect': 'postgresql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'postgresql://username:password@localhost:5432/hue'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Trino',
'dialect': 'trino',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'trino://localhost:8080/tpch'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'has_impersonation', 'value': False},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Presto',
'dialect': 'presto',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'presto://localhost:8080/tpch'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'has_impersonation', 'value': False},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'DaskSql',
'dialect': 'dasksql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'presto://localhost:8080/catalog/default'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
{'name': 'has_impersonation', 'value': False},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': False,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
}
},
{
'nice_name': 'Elasticsearch SQL',
'dialect': 'elasticsearch',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'elasticsearch+http://localhost:9200/'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': False,
'has_optimizer_values': False,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Calcite',
'dialect': 'calcite',
'interface': 'sqlalchemy',
'settings': [
{'name': 'server_host', 'value': 'localhost'},
{'name': 'server_port', 'value': 10000},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Athena',
'dialect': 'athena',
'interface': 'sqlalchemy',
'settings': [
{
'name': 'url',
'value': 'awsathena+rest://XXXXXXXXXXXXXXXXXXXX:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX@<EMAIL>ena.us-west-2.amazonaws.com:'
'443/default?s3_staging_dir=s3://gethue-athena/scratch'
},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Redshift',
'dialect': 'redshift',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'edshift+psycopg2://[email protected]:5439/database'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Snowflake',
'dialect': 'snowflake',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'snowflake://{user}:{password}@{account}/{database}'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': '<NAME>',
'dialect': 'bigquery',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'bigquery://project-XXXXXX/dataset_name'},
{'name': 'credentials_json', 'value': '{"type": "service_account", ...}'}
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Oracle',
'dialect': 'oracle',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'oracle://user:password@localhost'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Clickhouse',
'dialect': 'clickhouse',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'clickhouse://localhost:8123'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '"',
'sql_identifier_comment_single': '--',
'has_catalog': False,
'has_database': False,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'Solr SQL',
'dialect': 'solr',
'interface': 'solr',
'settings': [
{'name': 'url', 'value': 'solr://user:password@localhost:8983/solr/<collection>[?use_ssl=true|false]'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SQL Database',
'dialect': 'sql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'name://projectName/datasetName'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SQL Database (JDBC)',
'dialect': 'sql',
'interface': 'sqlalchemy',
'settings': [
{'name': 'url', 'value': 'jdbc:db2://localhost:50000/SQOOP'},
{'name': 'driver', 'value': 'com.ibm.db2.jcc.DB2Driver'},
{'name': 'user', 'value': 'hue'},
{'name': 'password', 'value': '<PASSWORD>'},
{'name': 'has_ssh', 'value': False},
{'name': 'ssh_server_host', 'value': '127.0.0.1'},
],
'category': 'editor',
'description': 'Deprecated: older way to connect to any database.',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': False,
'has_optimizer_values': False,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{
'nice_name': 'SqlFlow',
'dialect': 'sqlflow',
'interface': 'sqlflow',
'settings': [
{'name': 'url', 'value': 'localhost:50051'},
{'name': 'datasource', 'value': 'hive://localhost:10000/iris'},
],
'category': 'editor',
'description': '',
'properties': {
'is_sql': True,
'sql_identifier_quote': '`',
'sql_identifier_comment_single': '--',
'has_catalog': True,
'has_database': True,
'has_table': True,
'has_live_queries': False,
'has_optimizer_risks': True,
'has_optimizer_values': True,
'has_auto_limit': False,
'has_reference_language': False,
'has_reference_functions': False,
'has_use_statement': False,
}
},
{'nice_name': 'PySpark', 'dialect': 'pyspark', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'Spark', 'dialect': 'spark', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'Pig', 'dialect': 'pig', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'Java', 'dialect': 'java', 'settings': [], 'category': 'editor', 'description': '', 'properties': {}},
{'nice_name': 'HDFS', 'dialect': 'hdfs', 'interface': 'rest',
'settings': [
{'name': 'server_url', 'value': 'http://localhost:50070/webhdfs/v1'},
{'name': 'default_fs', 'value': 'fs_defaultfs=hdfs://localhost:8020'}
],
'category': 'browsers', 'description': '', 'properties': {}
},
{'nice_name': 'YARN', 'dialect': 'yarn', 'settings': [], 'category': 'browsers', 'description': '', 'properties': {}},
{'nice_name': 'S3', 'dialect': 's3', 'settings': [], 'category': 'browsers', 'description': '', 'properties': {}},
{'nice_name': 'ADLS', 'dialect': 'adls-v1', 'settings': [], 'category': 'browsers', 'description': '', 'properties': {}},
# HBase
# Solr
{
'nice_name': 'Hive Metastore',
'dialect': 'hms',
'interface': 'hiveserver2',
'settings': [{'name': 'server_host', 'value': ''}, {'name': 'server_port', 'value': ''},],
'category': 'catalogs',
'description': '',
'properties': {}
},
{
'nice_name': 'Atlas', 'dialect': 'atlas', 'interface': 'rest', 'settings': [], 'category': 'catalogs', 'description': '',
'properties': {}
},
{
'nice_name': 'Navigator', 'dialect': 'navigator', 'interface': 'rest', 'settings': [], 'category': 'catalogs',
'description': '',
'properties': {}
},
{'nice_name': 'Optimizer', 'dialect': 'optimizer', 'settings': [], 'category': 'optimizers', 'description': '', 'properties': {}},
{'nice_name': 'Oozie', 'dialect': 'oozie', 'settings': [], 'category': 'schedulers', 'description': '', 'properties': {}},
{'nice_name': 'Celery', 'dialect': 'celery', 'settings': [], 'category': 'schedulers', 'description': '', 'properties': {}},
]
CONNECTOR_TYPES = [connector for connector in CONNECTOR_TYPES if connector['dialect'] not in CONNECTORS_BLACKLIST.get()]
if CONNECTORS_WHITELIST.get():
CONNECTOR_TYPES = [connector for connector in CONNECTOR_TYPES if connector['dialect'] in CONNECTORS_WHITELIST.get()]
CATEGORIES = [
{"name": 'Editor', 'type': 'editor', 'description': ''},
{"name": 'Browsers', 'type': 'browsers', 'description': ''},
{"name": 'Catalogs', 'type': 'catalogs', 'description': ''},
{"name": 'Optimizers', 'type': 'optimizers', 'description': ''},
{"name": 'Schedulers', 'type': 'schedulers', 'description': ''},
{"name": 'Plugins', 'type': 'plugins', 'description': ''},
]
def get_connectors_types():
return CONNECTOR_TYPES
def get_connector_categories():
return CATEGORIES
def get_connector_by_type(dialect, interface):
instance = [
connector
for connector in get_connectors_types() if connector['dialect'] == dialect and connector['interface'] == interface
]
if instance:
return instance[0]
else:
raise PopupException(_('No connector with the type %s found.') % type)
|
StarcoderdataPython
|
3369804
|
from simulator.util.Vehicle import Vehicle
from simulator.UI.Record import EventBag
from simulator.util.World import World
from simulator.util.Camera import Camera
from simulator.util.transform.util import params_from_tansformation
import pickle
def test_simulate_key():
event_bag = EventBag("../../data/recording.h5", record=False)
vehicle = Vehicle(Camera(), play=False)
vehicle.set_transform(x=100)
all_states = pickle.load(open("../../data/tmp_all_states.pkl", "rb"))
# all_states = []
# for i in range(len(event_bag)):
# key, x, y = event_bag.next_event()
# vehicle.simulate(key, (x, y))
#
# all_states.append([vehicle.T.copy(), camera.C.copy(), vehicle.next_locations.copy(),
# vehicle.vertices_W.copy(), vehicle.turn_angle])
# pickle.dump(all_states, open("../../data/tmp_all_states.pkl", "wb"))
# event_bag.reset()
del event_bag
return all_states
def test_simulate_waypoint(all_states):
vehicle = Vehicle(Camera(), play=False)
vehicle.set_transform(x=100)
event_bag = EventBag("../../data/recording.h5", record=False)
for i in range(len(all_states)):
state = all_states[i]
key, x_mouse, y_mouse = event_bag.next_event()
next_vehicle_T = state[0]
x,y,z,roll,yaw,pitch = params_from_tansformation(next_vehicle_T)
vehicle.interpret_key(key) # this is necessary for the speed
vehicle.simulate_given_waypoint(x,z,yaw, (x_mouse, y_mouse))
x_n,y_n,z_n,roll_n,yaw_n,pitch_n = vehicle.get_transform()
a=10
del event_bag
if __name__ == "__main__":
all_states = test_simulate_key()
test_simulate_waypoint(all_states)
|
StarcoderdataPython
|
3416602
|
<reponame>ratsgib/amazonprice<gh_stars>0
from django.test import TestCase
from django.urls import reverse
from bs4 import BeautifulSoup
from product.models import Product, Price
def create_data(product_nums, price_nums):
'''
テストデータをnums件登録する。
'''
for num in range(product_nums):
p = Product.objects.create(
title=f"テスト商品{num}",
asin=f"{num:010}",
image="<img>"
)
for num in range(price_nums):
price = Price.objects.create(
product=p, price=num*100,
)
class ProductTests(TestCase):
def test_index_noresult(self):
'''
indexにアクセスし、商品の登録がないこと。
'''
response = self.client.get(reverse("index"))
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content.decode(), "html.parser")
self.assertTrue(soup.find(id="result_empty"))
def test_index_one(self):
'''
1件登録データがあるとき、リストが表示されていること。
'''
create_data(1, 1)
response = self.client.get(reverse("index"))
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content.decode(), "html.parser")
self.assertNotEqual(soup.select("#result_container .card"), [])
self.assertFalse(soup.find(id="result_empty"))
|
StarcoderdataPython
|
1964889
|
<filename>animius/Chatbot/CombinedChatbotModel.py
import copy
import tensorflow as tf
import animius as am
from .ChatbotModel import ChatbotModel
# A chatbot network built upon an intent-ner model, using its embedding tensor and thus saving VRAM.
# This model is meant for training. Once training is complete, it is advised to freeze the model
# and use the CombinedPredictionModel class instead.
class CombinedChatbotModel(ChatbotModel):
def __init__(self):
super().__init__()
self.intent_ner_model = None
self.intent_ner_initialized = False
self.train_intent_ner = None
self.predict_intent_ner = None
self.predict_chatbot = super().predict
self.init_vars = None
def build_graph(self, model_config, data, graph=None, embedding_tensor=None, intent_ner=None):
# graph and embedding_tensor arguments doesn't really do anything
# if data is None or 'embedding' not in data.values:
# raise ValueError('When creating a new model, data must contain a word embedding')
def copy_embedding(new_data):
if (new_data is None or 'embedding' not in new_data.values) and 'embedding' in data.values:
new_data.add_embedding_class(data.values['embedding'])
# intent_ner arg can be IntentNERModel, model config for intent ner, string, tuple of string
# tuple of model config and data and/or model, or none for a new intent ner model
if intent_ner is None:
if 'intent_ner' in model_config.config:
# storing intent ner in model config, most liekly used for saving / restoring
intent_ner = model_config.config['intent_ner']
else:
# create a new intent ner model
self.intent_ner_model = am.IntentNER.IntentNERModel()
if isinstance(intent_ner, am.ModelConfig):
self.intent_ner_model = am.IntentNER.IntentNERModel()
intent_ner_data = am.IntentNERData()
intent_ner_data.add_embedding_class(data.values['embedding'])
self.intent_ner_model.build_graph(intent_ner, intent_ner_data)
elif isinstance(intent_ner, am.IntentNER.IntentNERModel):
self.intent_ner_model = am.IntentNER.IntentNERModel()
copy_embedding(self.intent_ner_model.data)
if self.intent_ner_model.cost is None: # check if model has already been built
self.intent_ner_model.build_graph(am.ModelConfig(cls='IntentNER'), am.IntentNERData())
elif self.intent_ner_model.sess is not None:
self.intent_ner_initialized = True # we don't need to initialize the model
elif isinstance(intent_ner, str):
self.intent_ner_model = am.Model.load(intent_ner)
copy_embedding(self.intent_ner_model.data)
self.intent_ner_initialized = True
elif isinstance(intent_ner, tuple) or isinstance(intent_ner, list):
if len(intent_ner) == 3:
self.intent_ner_model, mc, new_data = intent_ner
copy_embedding(new_data)
if self.intent_ner_model.cost is None: # check if model has already been built
self.intent_ner_model.build_graph(mc, new_data)
elif len(intent_ner) == 2:
if isinstance(intent_ner[0], str):
# tuple of string, pair of (directory, name)
self.intent_ner_model = am.Model.load(intent_ner[0], intent_ner[1])
copy_embedding(self.intent_ner_model.data)
self.intent_ner_initialized = True
else:
# assume tuple of model config and data
mc, new_data = intent_ner
copy_embedding(new_data)
self.intent_ner_model = am.IntentNER.IntentNERModel()
self.intent_ner_model.build_graph(mc, data)
else:
raise ValueError("Unexpected tuple of intent_ner")
else:
raise TypeError("Unexpected type of intent_ner")
# at this point, self.intent_ner_model should be built
self.train_intent_ner = self.intent_ner_model.train
self.predict_intent_ner = self.intent_ner_model.predict
with self.intent_ner_model.graph.as_default():
intent_vars = set(tf.all_variables())
super().build_graph(model_config,
data,
embedding_tensor=self.intent_ner_model.word_embedding,
graph=self.intent_ner_model.graph)
with self.graph.as_default():
all_vars = set(tf.all_variables())
self.init_vars = all_vars - intent_vars
self.config = dict(model_config.config)
self.config['class'] = 'CombinedChatbot'
self.model_structure = dict(model_config.model_structure)
self.hyperparameters = dict(model_config.hyperparameters)
self.data = data
def init_tensorflow(self, graph=None, init_param=True, init_sess=True):
if init_param and self.intent_ner_initialized:
# we can only initialize the chatbot vars
self.sess = self.intent_ner_model.sess
self.graph = self.intent_ner_model.graph
super().init_tensorflow(graph=self.graph, init_param=False, init_sess=False)
with self.graph.as_default():
self.sess.run(
tf.variables_initializer(self.init_vars)
)
else:
super().init_tensorflow(graph=graph, init_param=init_param, init_sess=init_sess)
self.intent_ner_model.sess = self.sess
@classmethod
def load(cls, directory, name='model', data=None):
model = CombinedChatbotModel()
model.restore_config(directory, name)
if data is not None:
model.data = data
else:
model.data = am.ChatData()
model.build_graph(model.model_config(), model.data) # automatically builds intent ner in model config
model.init_word_embedding = False # prevent initializing the word embedding again
model.init_tensorflow(init_param=False, init_sess=True)
checkpoint = tf.train.get_checkpoint_state(directory)
input_checkpoint = checkpoint.model_checkpoint_path
with model.graph.as_default():
model.saver.restore(model.sess, input_checkpoint)
model.saved_directory = directory
model.saved_name = name
return model
def save(self, directory=None, name='model', meta=True, graph=False):
if self.intent_ner_model.saved_directory is None and self.intent_ner_model.saved_name is None:
# new model
self.intent_ner_model.save(directory=directory, name=name + '_intent_ner')
self.config['intent_ner'] = (directory, name + '_intent_ner')
else:
self.intent_ner_model.save() # default to model save
self.config['intent_ner'] = (self.intent_ner_model.saved_directory, self.intent_ner_model.saved_name)
super().save(directory, name, meta, graph)
def add_embedding(self, embedding):
# shortcut for adding embedding
self.data.add_embedding_class(embedding)
self.intent_ner_model.data.add_embedding_class(embedding)
def predict_combined(self, input_sentences=None, save_path=None):
if input_sentences is None:
input_sentences = self.data
if isinstance(input_sentences, am.IntentNERData) or isinstance(input_sentences, am.ChatData):
input_sentences = input_sentences.values['input']
# package str in a list
if isinstance(input_sentences, str):
input_sentences = [input_sentences]
sentences_cache = copy.copy(input_sentences)
intent_ner_results = self.predict_intent_ner(input_sentences, raw=False)
results = []
chat_indexes = []
for i in range(len(intent_ner_results)):
intent = intent_ner_results[i][0]
if intent > 0: # not chat
results.append(intent_ner_results[i])
else: # chat
chat_indexes.append(i)
results.append(None) # add tmp placeholder
if len(chat_indexes) > 0: # there are chat responses, proceed with chatbot prediction
chat_results = super().predict([sentences_cache[i] for i in chat_indexes], raw=False)
for i in range(len(chat_results)):
results[chat_indexes[i]] = (0, chat_results[i])
# saving
if save_path is not None:
with open(save_path, "w") as file:
for instance in results:
file.write('{0}, {1}\n'.format(*instance))
return results
def predict(self, input_data=None, save_path=None, raw=False, combined=True):
# automatically selects a prediction function
if combined:
# use combined
return self.predict_combined(input_sentences=input_data, save_path=save_path)
elif isinstance(input_data, am.IntentNERData):
return self.predict_intent_ner(input_data, save_path, raw)
else:
return self.predict_chatbot(input_data, save_path, raw)
|
StarcoderdataPython
|
318534
|
"""
auth.py
Auth mongoengine models.
"""
from mongoengine import Document
from mongoengine import fields
from werkzeug.security import gen_salt
from app.models.user import User
class Client(Document):
"""Auth Client model."""
user_id = fields.IntField(null=False)
user = fields.ReferenceField(User)
client_id = fields.StringField(primary_key=True)
client_secret = fields.StringField(null=False)
is_confidential = fields.BooleanField()
_redirect_uris = fields.StringField()
_default_scopes = fields.StringField()
@property
def allowed_grant_types(self):
"""Returns allowed grant types."""
return ['password', 'authorization_code']
@property
def client_type(self):
"""Returns client type."""
if self.is_confidential:
return 'confidential'
return 'public'
@property
def redirect_uris(self):
"""Returns a list of redirect URIs."""
if self._redirect_uris:
return str(self._redirect_uris).split()
return []
@property
def default_redirect_uri(self):
"""Returns the default redirect URI."""
return self.redirect_uris[0]
@property
def default_scopes(self):
"""Returns the client scopes."""
if self._default_scopes:
return str(self._default_scopes).split()
return []
@property
def has_password_credential_permission(self):
"""Returns true if the client has password permission."""
return True
@property
def has_facebook_credential_permission(self):
"""Returns true if the client has facebook permission."""
return True
@staticmethod
def generate(redirect_uris):
"""Generates a new client."""
item = Client(
client_id=gen_salt(40),
client_secret=gen_salt(50),
_redirect_uris=' '.join(redirect_uris),
_default_scopes='email',
user_id=None,
)
item.save()
class Grant(Document):
"""Auth Grant model."""
grant_id = fields.SequenceField(primary_key=True)
user_id = fields.IntField(null=False)
user = fields.ReferenceField(User)
client_id = fields.StringField(null=False)
client = fields.ReferenceField(Client)
code = fields.StringField(null=False)
redirect_uri = fields.StringField()
expires = fields.DateTimeField()
_scopes = fields.StringField()
@property
def scopes(self):
"""Returns the grant scopes."""
if self._scopes:
return str(self._scopes).split()
return []
class Token(Document):
"""Client token model."""
token_id = fields.SequenceField(primary_key=True)
user_id = fields.IntField(null=False)
user = fields.ReferenceField(User)
client_id = fields.StringField(null=False)
client = fields.ReferenceField(Client)
# currently only bearer is supported
token_type = fields.StringField()
access_token = fields.StringField(unique=True)
refresh_token = fields.StringField(unique=True)
expires = fields.DateTimeField()
_scopes = fields.StringField()
@property
def scopes(self):
"""Returns the token scopes."""
if self._scopes:
return str(self._scopes).split()
return []
|
StarcoderdataPython
|
98209
|
class TransportModesProvider(object):
TRANSPORT_MODES = {
'0': None,
# High speed train
'1': {
'node': [('railway','station'),('train','yes')],
'way': ('railway','rail'),
'relation': ('route','train')
},
# Intercity train
'2': {
'node': [('railway','station'),('train','yes')],
'way': ('railway','rail'),
'relation': ('route','train')
},
# Commuter rail
'3': {
'node': [('railway','station'),('train','yes')],
'way': ('railway','rail'),
'relation': ('route','train')
},
# Metro/Subway: default
'4': {
'node':[('railway','station'),('subway','yes')],
'way': [('railway','subway'),('tunnel','yes')],
'relation': ('route','subway')
},
# Light rail
'5': {
'node':[('railway','station'),('light_rail','yes')],
'way': ('railway','light_rail'),
'relation': ('route','light_rail')
},
# BRT
'6': {
'node':('highway','bus_stop'),
'way': ('busway','lane'),
'relation': ('route','bus')
},
# People mover
'7': None,
# Bus
'8': {
'node':('highway','bus_stop'),
'way': None,
'relation': ('route','bus')
},
# Tram
'9': {
'node':[('railway','station'),('tram','yes')],
'way': ('railway','tram'),
'relation': ('route','tram')
},
# Ferry
'10': {
'node':[('ferry','yes'),('amenity','ferry_terminal')],
'way': ('route','ferry'),
'relation': ('route','ferry')
}
}
def __init__(self, lines_info):
self._lines = {}
for line in lines_info:
self._lines[line['url_name']] = line
def tags(self, line_url_name, element_type):
transport_mode_id = self._lines[line_url_name]['transport_mode_id']
mode = self.TRANSPORT_MODES[str(transport_mode_id)] or self.TRANSPORT_MODES['4']
tags = mode[element_type]
if not isinstance(tags, list):
tags = [tags]
# We remove Nones from list
tags = list(filter(None, tags))
return tags
|
StarcoderdataPython
|
3531063
|
<reponame>dmyersturnbull/chembler<gh_stars>0
from pathlib import Path
from typing import Sequence
import decorateme
import pandas as pd
import regex
from pocketutils.core.exceptions import ParsingError
from typeddfs import TypedDfs
from mandos.model.utils.setup import MandosResources, logger
def _patterns(self: pd.DataFrame) -> Sequence[str]:
return self[self.columns[0]].values.tolist()
def _targets(self: pd.DataFrame) -> Sequence[str]:
return self.columns[1:].values.tolist()
def _get(self: pd.DataFrame, s: str) -> Sequence[str]:
for irow, pattern in enumerate(self[self.columns[0]].values):
try:
match: regex.Match = pattern.fullmatch(s)
except AttributeError:
raise ParsingError(f"Failed on regex {pattern}") from None
if match is not None:
return [pattern.sub(t, s.strip()) for t in self.T[irow] if isinstance(t, str)]
return s
_doc = r"""
A list of regex patterns and replacements.
The first column is the pattern, and the next n columns are the targets.
Has an important function, ``MappingFrame.get``, describe below.
These DataFrames are used in a few places to clean up, simplify, or otherwise process
predicate and object names.
Example:
For the input string "cyp450 2A3", consider we have these two rows:
row 1: ``['^Juggle protein [xy]', 'Juggle \1', 'J\1']``
row 2: ``['^CYP *450 (\d+)[A-Z]\d*$', 'Cytochrome P450 \1', 'CYP\1']``
First, we try to match against the first pattern. It doesn't match, so we try the next.
This one does match our input string, so we return ``["Cytochrome P450 2", "CYP2"]``.
The first returned element (here "Cytochrome P450 2"), is considered the primary,
while the second are -- for most usages -- considered optional extras.
"""
MappingDf = (
TypedDfs.typed("MappingDf")
.doc(_doc)
.add_methods(targets=_targets, patterns=_patterns, get=_get)
.post(lambda dfx: dfx.astype(str))
.secure()
).build()
@decorateme.auto_repr_str()
class _Compiler:
"""
Compiles multiple regex patterns, providing nice error messages.
All patterns are global (i.e. ^ and $ are affixed) and case-insensitive.
"""
def __init__(self):
self._i = 0
def compile(self, s: str) -> regex.Pattern:
self._i += 1 # header is the first
try:
return regex.compile("^" + s.strip() + "$", flags=regex.V1 | regex.IGNORECASE)
except regex.error:
raise ParsingError(f"Failed to parse '{s}' on row {self._i}") from None
@decorateme.auto_repr_str()
class Mappings:
"""
Creates MappingDfs.
See that documentation.
"""
@classmethod
def from_resource(cls, name: str) -> MappingDf:
path = MandosResources.a_file("mappings", name)
return cls.from_path(path)
@classmethod
def from_path(cls, path: Path) -> MappingDf:
"""
Reads a mapping from a CSV-like file or ``.regexes`` file.
Feather and Parquet are fine, too.
The ``.regexes`` suffix is a simple extension of CSV that uses ``|||`` as the delimiter.
and ignores empty lines and lines beginning with ``#``.
It's just nice for easily editing in a text editor.
"""
df = MappingDf.read_file(path)
compiler = _Compiler()
df[df.columns[0]] = df[df.columns[0]].map(compiler.compile)
logger.info(f"Read mapping with {len(df):,} items from {path}")
return df
__all__ = ["MappingDf", "Mappings"]
|
StarcoderdataPython
|
6606004
|
<reponame>Mhaiyang/iccv<gh_stars>1-10
"""
@Time : 1/13/21 20:04
@Author : TaylorMei
@Email : <EMAIL>
@Project : iccv
@File : infer.py
@Function:
"""
import time
import datetime
import sys
sys.path.append("..")
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from collections import OrderedDict
from config import *
from misc import *
from mirrornet_plus import MirrorNet_Plus
torch.manual_seed(2020)
device_ids = [1]
torch.cuda.set_device(device_ids[0])
results_path = './results'
# results_path = '/home/iccd/sod/results_intermediate_ca'
# results_path = '/home/iccd/sod/results_intermediate_sa'
check_mkdir(results_path)
ckpt_path = './ckpt'
# ckpt_path = '/media/iccd/disk2/tip_mirror_ckpt'
# exp_name = 'MirrorNet_NAC_SL_resnet50'
# exp_name = 'MirrorNet_NAC_resnet50_bie_four_ms_poly_v12'
exp_name = 'MirrorNet_Plus_sod_7'
args = {
'snapshot': '120',
'scale': 384,
'crf': False,
'save_results': True, # whether to save the resulting masks
'if_eval': False
}
print(torch.__version__)
img_transform = transforms.Compose([
transforms.Resize((args['scale'], args['scale'])),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
to_pil = transforms.ToPILImage()
to_test = OrderedDict([
('SOD', sod_path),
('ECSSD', ecssd_path),
('DUT-OMRON', dutomron_path),
('PASCAL-S', pascals_path),
('HKU-IS', hkuis_path),
# ('HKU-IS-TEST', hkuis_test_path),
('DUTS-TE', dutste_path),
# ('MSD', msd9_test_path)
])
results = OrderedDict()
def main():
net = MirrorNet_Plus(backbone_path).cuda(device_ids[0])
if len(args['snapshot']) > 0:
print('Load snapshot {} for testing'.format(args['snapshot']))
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
print('Load {} succeed!'.format(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net.eval()
with torch.no_grad():
start = time.time()
for name, root in to_test.items():
start_each = time.time()
image_path = os.path.join(root, 'image')
mask_path = os.path.join(root, 'mask')
precision_record, recall_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
mae_record = AvgMeter()
if args['save_results']:
check_mkdir(os.path.join(results_path, exp_name, args['snapshot'], '%s' % (name)))
img_list = [os.path.splitext(f)[0] for f in os.listdir(image_path) if f.endswith('g')]
for idx, img_name in enumerate(img_list):
# print('predicting for {}: {:>4d} / {}'.format(name, idx + 1, len(img_list)))
if name == 'HKU-IS':
img = Image.open(os.path.join(image_path, img_name + '.png')).convert('RGB')
else:
img = Image.open(os.path.join(image_path, img_name + '.jpg')).convert('RGB')
w, h = img.size
img_var = Variable(img_transform(img).unsqueeze(0)).cuda(device_ids[0])
# prediction = net(img_var)
# _, _, prediction = net(img_var)
_, _, _, prediction = net(img_var)
# _, _, _, _, prediction = net(img_var)
prediction = np.array(transforms.Resize((h, w))(to_pil(prediction.data.squeeze(0).cpu())))
# c = prediction.shape[1]
# prediction = np.array(transforms.Resize((int(c/4), c))(to_pil(prediction.data.transpose(1, 3).squeeze(0).cpu())))
if args['crf']:
prediction = crf_refine(np.array(img.convert('RGB')), prediction)
if args['save_results']:
Image.fromarray(prediction).convert('RGB').save(os.path.join(results_path, exp_name, args['snapshot'],
'%s' % (name), img_name + '.png'))
if args['if_eval']:
gt = np.array(Image.open(os.path.join(mask_path, img_name + '.png')).convert('L'))
precision, recall, mae = cal_precision_recall_mae(prediction, gt)
for pidx, pdata in enumerate(zip(precision, recall)):
p, r = pdata
precision_record[pidx].update(p)
recall_record[pidx].update(r)
mae_record.update(mae)
if args['if_eval']:
fmeasure = cal_fmeasure([precord.avg for precord in precision_record],
[rrecord.avg for rrecord in recall_record])
results[name] = OrderedDict([('F', "%.3f" % fmeasure), ('MAE', "%.3f" % mae_record.avg)])
print("{}'s average Time Is : {:.2f}".format(name, (time.time() - start_each) / len(img_list)))
if args['if_eval']:
print('test results:')
print(results)
end = time.time()
print("Total Testing Time: {}".format(str(datetime.timedelta(seconds=int(end - start)))))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
12849899
|
d1 = {}
d2 = {'one': 1, 'two': 2 }
d3 = dict(one=1, two=2)
d4 = dict((1, 2), (3, 4))
d5 = dict({1:2, 3:4})
|
StarcoderdataPython
|
1903905
|
<filename>src/el21uptime.py
#!/usr/bin/env python3
# Copyright 2020 Enapter, <NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pymodbus.client.sync import ModbusTcpClient
ip = sys.argv[1]
PORT = 502
REGISTER = 22 # How long the system has been running (seconds)
device = ModbusTcpClient(ip, PORT)
firmware = device.read_input_registers(REGISTER, 2, unit=1)
uptime = firmware.registers[1] | (firmware.registers[0] << 16)
print(uptime, 'seconds')
|
StarcoderdataPython
|
9690655
|
from setuptools import find_packages, setup
install_requires = [
'Django>=3.2,<4',
'attrs',
'djangorestframework>=3,<4',
'requests>=2.7',
]
docs_require = [
'sphinx>=1.5.2',
]
tests_require = [
'freezegun==1.1.0',
'pretend==1.0.9',
"pytest-cov==2.11.1",
"pytest-django==4.1.0",
"pytest==6.1.2",
"requests-mock==1.8.0",
"coverage==5.3",
# Linting
"isort<5",
"flake8==3.8.4",
"flake8-blind-except==0.1.1",
"flake8-debugger==3.2.1",
]
setup(
name='django-postcode-lookup',
version='1.0',
description="Pluggable postcode lookup endpoint",
long_description=open('README.rst', 'r').read(),
url='https://github.com/labd/django-postcode-lookup',
author="<NAME>",
author_email="<EMAIL>",
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'docs': docs_require,
'test': tests_require,
},
entry_points={},
package_dir={'': 'src'},
packages=find_packages('src'),
include_package_data=True,
license='MIT',
keywords=["postcode", "api", "validation"],
classifiers=[
"Development Status :: 1 - Production/Stable",
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 3.2',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
zip_safe=False,
)
|
StarcoderdataPython
|
3517175
|
import re, json, os, requests
import browsercookie
import DataConfiguration as data
from git import Repo,remote
class Hackerrank:
HEADERS = {
'x-csrf-token': '',
'cookie': '',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36'
}
BASE_URL = 'https://www.hackerrank.com/rest/auth/login'
URL_HACKERRANK_PTS = 'https://www.hackerrank.com/domains/algorithms?filters%5Bstatus%5D%5B%5D=unsolved&badge_type=problem-solving'
URL_DATA = 'https://www.hackerrank.com/rest/contests/master/submissions/?offset=0&limit'
SUBMISSIONS = {}
LANGUAGE = {
'python3': {
'main': "if __name__ == '__main__':",
'import': ["#!/bin/python3", "import(.*?)(\n)+"],
'extension': "py",
'comment': "#"
},
'java8': {
'main': r"private static final Scanner scanner = new Scanner",
'import': [r"public class Solution {", "import(.*?)(\n)+"],
'extension': "java",
'comment': "//"
},
'java': {
'main': r"private static final Scanner scanner = new Scanner",
'import': [r"public class Solution {", "import(.*?)(\n)+"],
'extension': "java",
'comment': "//"
},
'C': {
'main': "int main()",
'import': ["", "#include(.*?)(\n)+"],
'extension': "c",
'comment': "//"
},
'JavaScript': {
'main': "function main()",
'import': ["", "'use strict';(.*?)// Complete"],
'extension': "js",
'comment': "//"
}
}
def __init__(self):
pass
# Gets the needed cookies to keep a session active on www.hackerrank.com
def setHackerrankCookies(self):
chrome_cookie = browsercookie.chrome()._cookies
self.HEADERS['cookie'] = str(chrome_cookie['www.hackerrank.com']['/']['_hrank_session'])[8:-24] + ';' + str(chrome_cookie['.hackerrank.com']['/']['hackerrank_mixpanel_token'])[8:-21] + ';'
# Gets token from www.hackerrank.com
def setHackerrankToken(self):
login = requests.post(self.BASE_URL , data = data.HACKERRANK_LOGIN_DATA )
if re.findall('Invalid login or password', login.text):
raise Exception("Invalid login or password.")
else:
self.HEADERS['x-csrf-token'] = str(login.json()['csrf_token'])
# Create a file in the repository if it doesn't exist.
def createFile(self, name):
if not os.path.exists(name):
with open(name, 'w') as score_json:
score_json.write(" ")
# Gets www.hackerrank.com username from user input
def getScores(self) -> list:
scores_page = requests.get(self.URL_HACKERRANK_PTS, headers=self.HEADERS)
scores = re.findall('class="value">(.*?)<', scores_page.text)
if not scores:
raise Exception("No scores found.")
scores[1] = re.split('/', scores[1])[0]
return list(map(int, scores))
# Add the scores and rank on hackerrank to a JSON file.
def scoresToJson(self):
scores = self.getScores()
scores_json = {}
last_scores = ''
self.createFile("HackerRankScores.json")
with open("HackerRankScores.json", 'r') as scores_file:
scores_json = json.load(scores_file)
last_scores = (list(scores_json.keys())[-1])
if not scores_json[last_scores]['rank'] == scores[0] and not scores_json[last_scores]['points'] == scores[1]:
scores_json[str(int(last_scores) + 1)] = {'rank': scores[0], 'points': scores[1]}
with open("HackerRankScores.json", 'w') as scores_file:
json.dump(scores_json, scores_file)
# Gets the url for the successful challenges.
def getSubmissions(self):
nb_submission = json.loads(requests.get(url=self.URL_DATA, headers=self.HEADERS).content)['total']
get_track = requests.get(url=self.URL_DATA + "=" +str(nb_submission), headers=self.HEADERS)
data = json.loads(get_track.content)
for i in range(len(data['models'])):
name = data['models'][i]['challenge']['name']
sub_url = 'https://www.hackerrank.com/rest/contests/master/challenges/' + data['models'][i]['challenge']['slug'] + '/submissions/' + str(data['models'][i]['id'])
if data['models'][i]['status'] == "Accepted" and name not in self.SUBMISSIONS: self.SUBMISSIONS[name] = sub_url
# Gets the code from successful challenges. last(int) the nb of challenge to get, imp(bool) write the import statements, main(bool) write the main function.
def getCode(self, all: bool, imp: bool, main: bool):
if len(self.SUBMISSIONS) == 0: self.getSubmissions()
if all: all = len(list(self.SUBMISSIONS.keys()))
else: all = 1
# Gets the code of the last successful submission/all the last successful submissions.
for i in range(all):
key = list(self.SUBMISSIONS.keys())[i]
submission = requests.get(url=self.SUBMISSIONS[key], headers=self.HEADERS).json()['model']
code = submission['code']
lang = submission['language']
name = submission['name']
category = ''
if len(submission['badges']) > 0:
category = submission['badges'][0]['badge_name']
difficulty_Url = re.split('submissions', self.SUBMISSIONS[key])[0]
difficulty = requests.get(url=difficulty_Url, headers=self.HEADERS).json()['model']['difficulty_name']
# Create a description of the challenge: Type of challenge - Name of track - Difficulty.
description = category + " - " + name + " - " + difficulty
code = re.sub(self.LANGUAGE[lang]['comment']+'(.*?)(\n)', '', code)
# Remove the import tags.
if imp:
code = re.sub(self.LANGUAGE[lang]['import'][0], '', code)
code = re.sub(self.LANGUAGE[lang]['import'][1], '', code)
# Remove the main function.
if main:
code = re.split(self.LANGUAGE[lang]['main'], code)[0]
code = self.formatReturnToLine(code)
# Checks if the challenge has already been written in the corresponding language file.
hackerrank_file = ''
self.createFile(data.GITHUB_REPOSITORY['path'] + 'solutions.' + self.LANGUAGE[lang]['extension'])
with open(data.GITHUB_REPOSITORY['path'] + 'solutions.' + self.LANGUAGE[lang]['extension'], 'r') as f:
hackerrank_file = f.read()
# write the challenge code to the corresponding language file.
if not name in hackerrank_file:
code = '\n' + self.LANGUAGE[lang]['comment'] + " " + description + code
with open(data.GITHUB_REPOSITORY['path'] + 'solutions.' + self.LANGUAGE[lang]['extension'] , 'a') as f:
f.write(code)
# Format the return to line at the beginning and end of the code.
def formatReturnToLine(self, code: str = 'code') -> str:
code = re.sub("^(\n)*", "\n", code)
return re.sub("([\n]*)$", "\n\n", code)
# Check if the path is a valid directory.
# TODO: check if the repository is a cloned GitHub repository.
def isPathValid(self, path) -> bool:
if not os.path.isdir(path):
raise Exception("Directory does not exit.")
return True
# Push the repository to GitHub.
def pushToGitHub(self):
if self.isPathValid(data.GITHUB_REPOSITORY['path']):
try:
repo = Repo(data.GITHUB_REPOSITORY['path'])
repo.git.add(update=True)
changedFiles = [ item.a_path for item in repo.index.diff(repo.head.commit) ]
if len(changedFiles) > 0:
repo.index.commit(data.GITHUB_REPOSITORY['commit message'])
origin = repo.remote(name='origin')
origin.push()
print('File(s) push from script succeeded')
else: print('File(s) unchanged. No push executed')
except:
print('Some error occured while pushing the File(s)')
if __name__ == "__main__":
s = Hackerrank()
s.setHackerrankCookies()
if data.GET_SCORES:
s.getScores()
s.scoresToJson()
s.getCode(all=data.GET_ALL_SUCCESSFUL_CHALLENGES, imp=data.REMOVE_IMP_STATEMENT, main=data.REMOVE_IMP_STATEMENT)
if data.PUSH_TO_GET_HUB :s.pushToGitHub()
|
StarcoderdataPython
|
230413
|
<reponame>konichar/covidus<gh_stars>0
from django.forms import ModelForm
from covidus_main.models import Profile
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = '__all__'
def __str__(self):
return self.name
|
StarcoderdataPython
|
11270100
|
<gh_stars>100-1000
#!/usr/bin/env python3
import argparse
import sys
from tabulate import tabulate
import yaml
import os
import generatehelpers
config = {}
def gen_sig_table(oqslibdocdir):
liboqs_sig_docs_dir = os.path.join(oqslibdocdir, 'algorithms', 'sig')
liboqs_sigs = {}
for root, _, files in os.walk(liboqs_sig_docs_dir):
for fil in files:
if fil.endswith(".yml"):
with open(os.path.join(root, fil), mode='r', encoding='utf-8') as f:
algyml = yaml.safe_load(f.read())
liboqs_sigs[algyml['name']]=algyml
table = [['Algorithm', 'Implementation Version',
'NIST round', 'Claimed NIST Level', 'Code Point', 'OID']]
claimed_nist_level = 0
for sig in sorted(config['sigs'], key=lambda s: s['family']):
for variant in sig['variants']:
if variant['security'] == 128:
claimed_nist_level = 1
elif variant['security'] == 192:
claimed_nist_level = 3
elif variant['security'] == 256:
claimed_nist_level = 5
else:
sys.exit("variant['security'] value malformed.")
if sig['family'].startswith('SPHINCS'):
sig['family'] = 'SPHINCS+'
table.append([variant['name'], liboqs_sigs[sig['family']]['spec-version'],
liboqs_sigs[sig['family']]['nist-round'], claimed_nist_level, variant['code_point'],
variant['oid']])
for hybrid in variant['mix_with']:
table.append([variant['name'] + ' **hybrid with** ' + hybrid['name'],
liboqs_sigs[sig['family']]['spec-version'],
liboqs_sigs[sig['family']]['nist-round'],
claimed_nist_level,
hybrid['code_point'],
hybrid['oid']])
with open(os.path.join('oqs-template', 'oqs-sig-info.md'), mode='w', encoding='utf-8') as f:
f.write(tabulate(table, tablefmt="pipe", headers="firstrow"))
print("Written oqs-sig-info.md")
def gen_kem_table(oqslibdocdir):
liboqs_kem_docs_dir = os.path.join(oqslibdocdir, 'algorithms', 'kem')
liboqs_kems = {}
for root, _, files in os.walk(liboqs_kem_docs_dir):
for fil in files:
if fil.endswith(".yml"):
with open(os.path.join(root, fil), mode='r', encoding='utf-8') as f:
algyml = yaml.safe_load(f.read())
liboqs_kems[algyml['name']]=algyml
liboqs_kems['SIDH']=liboqs_kems['SIKE']
# TODO: Workaround for wrong upstream name for Kyber:
liboqs_kems['CRYSTALS-Kyber']=liboqs_kems['Kyber']
table_header = ['Family', 'Implementation Version', 'Variant', 'NIST round', 'Claimed NIST Level',
'Code Point', 'Hybrid Elliptic Curve (if any)']
table = []
hybrid_elliptic_curve = ''
for kem in sorted(config['kems'], key=lambda k: k['family']):
if kem['bit_security'] == 128:
claimed_nist_level = 1
hybrid_elliptic_curve = 'secp256_r1'
elif kem['bit_security'] == 192:
claimed_nist_level = 3
hybrid_elliptic_curve = 'secp384_r1'
elif kem['bit_security'] == 256:
claimed_nist_level = 5
hybrid_elliptic_curve = 'secp521_r1'
else:
sys.exit("kem['bit_security'] value malformed.")
if 'implementation_version' in kem:
implementation_version = kem['implementation_version']
else:
implementation_version = liboqs_kems[kem['family']]['spec-version']
try:
table.append([kem['family'], implementation_version,
kem['name_group'], liboqs_kems[kem['family']]['nist-round'], claimed_nist_level,
kem['nid'], ""])
table.append([kem['family'], implementation_version,
kem['name_group'], liboqs_kems[kem['family']]['nist-round'], claimed_nist_level,
kem['nid_hybrid'], hybrid_elliptic_curve])
except KeyError as ke:
# Non-existant NIDs mean this alg is not supported any more
pass
if 'extra_nids' in kem:
if 'current' in kem['extra_nids']: # assume "current" NIDs to mean liboqs-driven NIST round information:
for entry in kem['extra_nids']['current']:
table.append([kem['family'], implementation_version,
kem['name_group'], liboqs_kems[kem['family']]['nist-round'], claimed_nist_level,
entry['nid'],
entry['hybrid_group'] if 'hybrid_group' in entry else ""])
if 'old' in kem['extra_nids']: # assume "old" submissions to mean NIST round 2:
for entry in kem['extra_nids']['old']:
table.append([kem['family'], entry['implementation_version'],
kem['name_group'], 2, claimed_nist_level,
entry['nid'],
entry['hybrid_group'] if 'hybrid_group' in entry else ""])
# sort by: family, version, security level, variant, hybrid
table.sort(key = lambda row: "{:s}|{:s}|{:d}|{:s}|{:s}".format(row[0], row[1], row[3], row[2], row[5]))
table = [table_header] + table
with open(os.path.join('oqs-template', 'oqs-kem-info.md'), mode='w', encoding='utf-8') as f:
f.write(tabulate(table, tablefmt="pipe", headers="firstrow"))
f.write("\n")
print("Written oqs-kem-info.md")
# main:
with open(os.path.join('oqs-template', 'generate.yml'), mode='r', encoding='utf-8') as f:
config = yaml.safe_load(f.read())
if 'LIBOQS_DOCS_DIR' not in os.environ:
parser = argparse.ArgumentParser()
parser.add_argument('--liboqs-docs-dir', dest="liboqs_docs_dir", required=True)
args = parser.parse_args()
oqsdocsdir = args.liboqs_docs_dir
else:
oqsdocsdir = os.environ["LIBOQS_DOCS_DIR"]
config = generatehelpers.complete_config(config, oqsdocsdir)
gen_kem_table(oqsdocsdir)
gen_sig_table(oqsdocsdir)
|
StarcoderdataPython
|
11278239
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppAccountBalanceQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppAccountBalanceQueryResponse, self).__init__()
self._account = None
self._available_money = None
self._balance = None
self._date = None
self._freeze_money = None
self._request_time = None
@property
def account(self):
return self._account
@account.setter
def account(self, value):
self._account = value
@property
def available_money(self):
return self._available_money
@available_money.setter
def available_money(self, value):
self._available_money = value
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, value):
self._balance = value
@property
def date(self):
return self._date
@date.setter
def date(self, value):
self._date = value
@property
def freeze_money(self):
return self._freeze_money
@freeze_money.setter
def freeze_money(self, value):
self._freeze_money = value
@property
def request_time(self):
return self._request_time
@request_time.setter
def request_time(self, value):
self._request_time = value
def parse_response_content(self, response_content):
response = super(AlipayEbppAccountBalanceQueryResponse, self).parse_response_content(response_content)
if 'account' in response:
self.account = response['account']
if 'available_money' in response:
self.available_money = response['available_money']
if 'balance' in response:
self.balance = response['balance']
if 'date' in response:
self.date = response['date']
if 'freeze_money' in response:
self.freeze_money = response['freeze_money']
if 'request_time' in response:
self.request_time = response['request_time']
|
StarcoderdataPython
|
4952388
|
<filename>neurokernel/version.py
import pkg_resources
__version__ = pkg_resources.require('neurokernel')[0].version
|
StarcoderdataPython
|
6511477
|
import logging
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
for config in LOGGING['loggers'].values():
config['level'] = "WARNING"
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%d.%m %H:%M:%S',
)
logging.info("Test settings loaded.")
|
StarcoderdataPython
|
3581554
|
from typing import List, Union, Dict
import os
from engine.data_sources.base_db import BaseDB
from engine.data_sources.base_source import BaseSource, GUIDMissing
from engine.data_sources.base_table import BaseTable
from engine.data_sources.local_fs.local_fs_database import LocalFSDatabase
from engine.data_sources.local_fs.local_fs_table import LocalFSTable
from engine.data_sources.local_fs.local_fs_utils import get_column_sample_from_csv_file, correct_file_ending
class LocalFsSource(BaseSource):
def __init__(self, base_folder_path: str):
self.__base_folder_path = base_folder_path
self.__dbs: Dict[object, BaseDB] = dict()
self.__db_guids: list = list(filter(lambda x: x != self.__base_folder_path.split(os.sep)[-1],
[x[0].split(os.sep)[-1] for x in os.walk(self.__base_folder_path)]))
def contains_db(self, guid: object) -> bool:
if guid in self.__db_guids:
return True
return False
def get_db(self, guid: object, load_data: bool = True) -> BaseDB:
if guid not in self.__db_guids:
raise GUIDMissing
if guid not in self.__dbs:
self.__dbs[guid] = LocalFSDatabase(self.__base_folder_path, str(guid))
return self.__dbs[guid]
def get_all_dbs(self, load_data: bool = True) -> Dict[object, BaseDB]:
for db_name in self.__db_guids:
self.__dbs[db_name] = LocalFSDatabase(self.__base_folder_path, db_name)
return self.__dbs
def get_db_table(self, guid: object, db_guid: object = None, load_data: bool = True) -> Union[BaseDB, BaseTable]:
if db_guid is None:
raise GUIDMissing
try:
table_path = self.__base_folder_path + os.sep + str(db_guid) + os.sep + correct_file_ending(str(guid))
table: LocalFSTable = LocalFSTable(table_path, correct_file_ending(str(guid)), str(db_guid),
load_data)
except FileNotFoundError:
raise GUIDMissing
else:
return table
def get_column_sample(self, db_name: str, table_name: str, column_name: str, n: int = 10) -> List:
table_path = self.__base_folder_path + os.sep + db_name + os.sep + correct_file_ending(table_name)
return get_column_sample_from_csv_file(table_path, column_name, n)
|
StarcoderdataPython
|
380839
|
<gh_stars>0
import os
import logging
try:
from urllib2 import urlopen
from urllib2 import URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import URLError
from collections import namedtuple
from lxml import etree
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
import threading
from .utils import Base, Bundle
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
DatasetIdentifier = namedtuple("DatasetIdentifier", ['repository', 'id'])
class Contact(Bundle):
pass
class Species(Bundle):
pass
class Dataset(Base):
def __init__(self, id, summary, species=None, instruments=None, contacts=None, dataset_files=None):
self.id = id
self.summary = summary
self.species = species
self.instruments = instruments
self.contacts = contacts
self.dataset_files = dataset_files
def __iter__(self):
return iter(self.dataset_files)
def __getitem__(self, i):
return self.dataset_files[i]
def __len__(self):
return len(self.dataset_files)
def download(self, destination=None, filter=None, threads=None):
if threads is None:
threads = 0
if destination is None:
destination = '.'
if threads == 1:
for data_file in self:
if filter is not None and filter(data_file):
continue
logger.info(
"Downloading %s to %s",
data_file.name,
os.path.join(destination, data_file.name))
data_file.download(
os.path.join(destination, data_file.name))
else:
inqueue = Queue()
for data_file in self:
if filter is not None and filter(data_file):
continue
inqueue.put((data_file, os.path.join(destination, data_file.name)))
def _work():
while 1:
try:
data_file, destination = inqueue.get(False, 3.0)
logger.info("Downloading %s to %s", data_file.name, destination)
try:
data_file.download(destination)
except URLError as err:
logger.error("An error occurred, retrying", exc_info=True)
import time
time.sleep(2)
data_file.download(destination)
except Empty:
break
if threads <= 0:
threads = len(self)
workers = []
for i in range(threads):
worker = threading.Thread(target=_work)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
@staticmethod
def parse_identifier_list(node):
return [DatasetIdentifier(x[0].attrib['name'], x[0].attrib['value']) for x in node.findall(".//DatasetIdentifier")]
@staticmethod
def parse_species_list(node):
species = []
for spec in node.findall(".//Species"):
entry = Species()
for param in spec:
param = param.attrib
entry[param['name'].replace("taxonomy: ", "")] = param['value']
species.append(entry)
return species
@staticmethod
def parse_instrument_list(node):
instruments = []
for inst in node.findall(".//Instrument"):
entry = {}
entry['id'] = inst.attrib['id']
for param in (p.attrib for p in inst):
entry[param['name']] = param.get('value', True)
instruments.append(entry)
return instruments
@staticmethod
def parse_contacts_list(node):
contacts = []
for contact in node.findall(".//Contact"):
entry = Contact()
for param in (p.attrib for p in contact):
entry[param['name']] = param.get('value', True)
contacts.append(entry)
return contacts
@classmethod
def from_xml(cls, node):
return cls(
id=node.attrib['id'],
summary=DatasetSummary.from_xml(node.find("./DatasetSummary")),
species=cls.parse_species_list(node),
instruments=cls.parse_instrument_list(node),
contacts=cls.parse_contacts_list(node),
dataset_files=[DatasetFile.from_xml(
n) for n in node.findall(".//DatasetFile")],
)
@classmethod
def get(cls, accession):
url = "http://proteomecentral.proteomexchange.org/cgi/GetDataset?ID={accession}&outputMode=XML&test=no"
url = url.format(accession=accession)
fd = urlopen(url)
return cls.from_xml(etree.parse(fd).getroot())
get = Dataset.get
class DatasetSummary(Base):
def __init__(self, title, hosting_repository, description, review_level, repository_support):
self.title = title
self.hosting_repository = hosting_repository
self.description = description
self.review_level = review_level
self.repository_support = repository_support
@classmethod
def from_xml(cls, node):
return cls(
title=node.attrib['title'],
hosting_repository=node.attrib['hostingRepository'],
description=node.find("Description").text,
review_level=node.find("ReviewLevel")[0].attrib['name'],
repository_support=node.find("RepositorySupport")[
0].attrib['name'],
)
class DatasetFile(Base):
MAX_LEN_DISPLAY = 256
def __init__(self, id, name, file_type, uri):
self.id = id
self.name = name
self.file_type = file_type
self.uri = uri
@classmethod
def from_xml(cls, node):
return cls(
node.attrib['id'],
node.attrib['name'],
node.find(".//cvParam").attrib['name'].replace("URI", "").strip(),
node.find(".//cvParam").attrib['value']
)
def download(self, destination=None):
if destination is None:
destination = self.name
if not hasattr(destination, 'write'):
fh = open(destination, 'wb')
else:
fh = destination
source = urlopen(self.uri)
with fh:
chunk_size = 2 ** 16
chunk = source.read(chunk_size)
while chunk:
fh.write(chunk)
chunk = source.read(chunk_size)
|
StarcoderdataPython
|
278780
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import sys
import tempfile
import setuptools
from setuptools.command.build_ext import build_ext as _build_ext
__all__ = ["build_ext"]
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f:
f.write("int main (int argc, char **argv) { return 0; }")
f.flush()
try:
obj = compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
if not os.path.exists(obj[0]):
return False
return True
def has_library(compiler, libname):
"""Return a boolean indicating whether a library is found."""
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as srcfile:
srcfile.write("int main (int argc, char **argv) { return 0; }")
srcfile.flush()
outfn = srcfile.name + ".so"
try:
compiler.link_executable(
[srcfile.name],
outfn,
libraries=[libname],
)
except setuptools.distutils.errors.LinkError:
return False
if not os.path.exists(outfn):
return False
os.remove(outfn)
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, "-std=c++14"):
return "-std=c++14"
elif has_flag(compiler, "-std=c++11"):
return "-std=c++11"
else:
raise RuntimeError("Unsupported compiler -- at least C++11 support "
"is needed!")
class build_ext(_build_ext):
"""
A custom extension builder that finds the include directories for Eigen
before compiling.
"""
c_opts = {
"msvc": ["/EHsc"],
"unix": [],
}
def build_extensions(self):
# Add the numpy and pybind11 include directories
import numpy
import pybind11
include_dirs = [
numpy.get_include(),
pybind11.get_include(False),
pybind11.get_include(True),
]
# Find FFTW headers
dirs = include_dirs + self.compiler.include_dirs
for ext in self.extensions:
dirs += ext.include_dirs
dirs += [
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(
sys.executable))), "include")]
print(dirs)
found_fftw = False
for d in dirs:
if os.path.exists(os.path.join(d, "fftw3.h")):
print("found 'fftw3' in '{0}'".format(d))
include_dirs += [d]
found_fftw = True
break
if not found_fftw:
raise RuntimeError("could not find the required library 'fftw3'")
for ext in self.extensions:
ext.include_dirs += include_dirs
# Set up pybind11
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == "unix":
opts.append("-DVERSION_INFO=\"{0:s}\""
.format(self.distribution.get_version()))
print("testing C++14/C++11 support")
opts.append(cpp_flag(self.compiler))
libraries = ["fftw3", "m", "stdc++", "c++"]
# Check for OpenMP support first.
if has_flag(self.compiler, "-fopenmp"):
print("found omp...")
libraries += ["gomp", "fftw3_threads", "fftw3_omp"]
# Add the libraries
print("checking libraries...")
libraries = [lib for lib in libraries
if has_library(self.compiler, lib)]
print("libraries: {0}".format(libraries))
for ext in self.extensions:
ext.libraries += libraries
flags = ["-O3", "-Ofast", "-stdlib=libc++", "-fvisibility=hidden",
"-Wno-unused-function", "-Wno-uninitialized",
"-Wno-unused-local-typedefs", "-funroll-loops",
"-fopenmp"]
# Mac specific flags and libraries
if sys.platform == "darwin":
flags += ["-march=native", "-mmacosx-version-min=10.9"]
for ext in self.extensions:
ext.extra_link_args += ["-mmacosx-version-min=10.9",
"-march=native"]
# Check the flags
print("testing compiler flags")
for flag in flags:
if has_flag(self.compiler, flag):
opts.append(flag)
elif ct == "msvc":
opts.append('/DVERSION_INFO=\\"{0:s}\\"'
.format(self.distribution.get_version()))
for ext in self.extensions:
ext.extra_compile_args += opts
# Run the standard build procedure.
_build_ext.build_extensions(self)
|
StarcoderdataPython
|
4905181
|
<reponame>sopherapps/judah
"""Module containing tests for the quarter based Exports Site data source"""
import os
from collections import Iterator
from datetime import date
from typing import Optional
from unittest import TestCase, main
from unittest.mock import patch, Mock, call
from selenium import webdriver
from judah.sources.export_site.quarter_based import QuarterBasedExportSiteSource
from judah.utils.dates import convert_date_to_quarter_year_tuple, update_quarter_year_tuple
_PARENT_FOLDER = os.path.dirname(__file__)
_MOCK_ASSET_FOLDER_PATH = os.path.join(os.path.dirname(os.path.dirname(_PARENT_FOLDER)), 'assets')
class ChildQuarterBasedExportSiteSource(QuarterBasedExportSiteSource):
"""Child export site source that just picks a file from the file system"""
base_uri: str = 'http://example.com'
name: str = 'test_export_site_source'
def _download_file(self, start_date: date, end_date: date) -> Optional[str]:
"""Downloads the CSV from the export site and returns the path to it"""
return None
class TestQuarterBasedExportSiteSource(TestCase):
"""Tests for the QuarterBasedExportSiteSource"""
def setUp(self) -> None:
"""Initialize some variables"""
self.mock_csv_file_path = os.path.join(_MOCK_ASSET_FOLDER_PATH, 'mock.csv')
self.expected_data = [
{"Date": "09/03/2020", "number": "1", "period_from": "00:00", "period_until": "00:15", "Capacity": "16616"},
{"Date": "09/03/2020", "number": "2", "period_from": "00:15", "period_until": "00:30", "Capacity": "16616"},
{"Date": "09/03/2020", "number": "3", "period_from": "00:30", "period_until": "00:45", "Capacity": "16616"},
{"Date": "09/03/2020", "number": "4", "period_from": "00:45", "period_until": "01:00", "Capacity": "16620"},
]
@patch('judah.sources.export_site.quarter_based.visit_website')
def test_initialize_chrome(self, mock_visit_website):
"""
Should initialize Chrome in case it is not yet initialized and visits the base url
"""
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
self.assertIsNone(quarter_based_export_site_source.chrome)
quarter_based_export_site_source._initialize_chrome()
self.assertIsInstance(quarter_based_export_site_source.chrome, webdriver.Chrome)
mock_visit_website.assert_called_once_with(
driver=quarter_based_export_site_source.chrome, website_url=quarter_based_export_site_source.base_uri)
quarter_based_export_site_source.chrome.close()
quarter_based_export_site_source.chrome.quit()
@patch.object(ChildQuarterBasedExportSiteSource, '_initialize_chrome')
def test_del(self, mock_initialize_chrome):
"""
Should quit chrome on deletion
"""
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
quarter_based_export_site_source.chrome = Mock(spec=webdriver.Chrome)
quarter_based_export_site_source.__del__()
quarter_based_export_site_source.chrome.quit.assert_called_once()
@patch('judah.sources.export_site.quarter_based.delete_parent_folder')
@patch.object(ChildQuarterBasedExportSiteSource, '_initialize_chrome')
@patch.object(ChildQuarterBasedExportSiteSource, '_download_file')
def test_query_data_source(self, mock_download_file, mock_initialize_chrome, mock_delete_parent_folder):
"""
Should query a given start_quarter_and_year and end_quarter_and_year
and return an iterator with data records and then deletes folder
"""
# initializations
mock_download_file.return_value = self.mock_csv_file_path
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
start_quarter_and_year = (1, 2020,)
end_quarter_and_year = (3, 2020,)
# method call
response = quarter_based_export_site_source._query_data_source(
start_quarter_and_year=start_quarter_and_year, end_quarter_and_year=end_quarter_and_year)
self.assertIsInstance(response, Iterator)
data = [record for record in response]
# assertions
mock_initialize_chrome.assert_called_once()
mock_download_file.assert_called_once_with(
start_quarter_and_year=start_quarter_and_year, end_quarter_and_year=end_quarter_and_year)
mock_delete_parent_folder.assert_called_once_with(self.mock_csv_file_path)
self.assertListEqual(data, self.expected_data)
@patch('judah.sources.export_site.quarter_based.delete_parent_folder')
@patch.object(ChildQuarterBasedExportSiteSource, '_initialize_chrome')
@patch.object(ChildQuarterBasedExportSiteSource, '_download_file')
def test_query_data_source_no_file_downloaded(self, mock_download_file, mock_initialize_chrome,
mock_delete_parent_folder):
"""
Should query a given start_quarter_and_year and end_quarter_and_year
and return an empty iterator if there is no file downloaded
"""
# initializations
mock_download_file.return_value = None
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
start_quarter_and_year = (1, 2020,)
end_quarter_and_year = (3, 2020,)
# method call
response = quarter_based_export_site_source._query_data_source(
start_quarter_and_year=start_quarter_and_year, end_quarter_and_year=end_quarter_and_year)
self.assertIsInstance(response, Iterator)
data = [record for record in response]
# assertions
mock_initialize_chrome.assert_called_once()
mock_download_file.assert_called_once_with(
start_quarter_and_year=start_quarter_and_year, end_quarter_and_year=end_quarter_and_year)
mock_delete_parent_folder.assert_not_called()
self.assertListEqual(data, [])
@patch.object(ChildQuarterBasedExportSiteSource, '_query_data_source')
@patch.object(ChildQuarterBasedExportSiteSource, '_get_next_end_quarter_and_year')
@patch.object(ChildQuarterBasedExportSiteSource, '_get_next_start_quarter_and_year')
def test_get(self, mock_get_next_start_quarter_and_year,
mock_get_next_end_quarter_and_year, mock_query_data_source):
"""
Should return data from a given date to a given date as an iterator
"""
start_quarter_and_year = (1, 2020,)
end_quarter_and_year = (3, 2020,)
mock_get_next_start_quarter_and_year.return_value = start_quarter_and_year
mock_get_next_end_quarter_and_year.return_value = end_quarter_and_year
mock_query_data_source.return_value = (record for record in self.expected_data)
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
response = quarter_based_export_site_source.get()
self.assertIsInstance(response, Iterator)
data = [record for record in response]
self.assertListEqual(data, self.expected_data)
mock_get_next_start_quarter_and_year.assert_has_calls(
[call(), call(quarters_to_increment_by=quarter_based_export_site_source.default_batch_size_in_quarters)])
mock_get_next_end_quarter_and_year.assert_called_once()
@patch.object(ChildQuarterBasedExportSiteSource, '_query_data_source')
@patch.object(ChildQuarterBasedExportSiteSource, '_get_next_end_quarter_and_year')
@patch.object(ChildQuarterBasedExportSiteSource, '_get_next_start_quarter_and_year')
def test_get_end_quarter_earlier_than_start_quarter(self, mock_get_next_start_quarter_and_year,
mock_get_next_end_quarter_and_year, mock_query_data_source):
"""
Should return an iterator of an empty list
if the end_quarter_and_year is earlier than start_quarter_and_year
"""
start_quarter_and_year = (1, 2020,)
end_quarter_and_year = (3, 2019,)
mock_get_next_start_quarter_and_year.return_value = start_quarter_and_year
mock_get_next_end_quarter_and_year.return_value = end_quarter_and_year
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
response = quarter_based_export_site_source.get()
self.assertIsInstance(response, Iterator)
data = [record for record in response]
self.assertListEqual(data, [])
mock_get_next_start_quarter_and_year.assert_called_once()
mock_get_next_end_quarter_and_year.assert_called_once()
mock_query_data_source.assert_not_called()
def test_get_next_start_quarter_and_year_with_no_initial_quarter_year_tuples(self):
"""
Should get the next start_quarter_and_year tuple as current quarter, year
minus the default_batch_size_in_quarters plus net quarter increment
when only default_batch_size_in_quarters is given
"""
today = date.today()
current_quarter_and_year_tuple = convert_date_to_quarter_year_tuple(today)
quarters_to_increment_by = 4
quarters_to_decrement_by = 8
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
net_quarters_to_increment_by = (
quarters_to_increment_by
- quarters_to_decrement_by
- quarter_based_export_site_source.default_batch_size_in_quarters)
next_start_quarter_and_year = quarter_based_export_site_source._get_next_start_quarter_and_year(
quarters_to_increment_by=quarters_to_increment_by, quarters_to_decrement_by=quarters_to_decrement_by)
expected_next_start_quarter_and_year = update_quarter_year_tuple(
current_quarter_and_year_tuple, quarters_to_increment_by=net_quarters_to_increment_by,
quarters_to_decrement_by=0)
self.assertTupleEqual(next_start_quarter_and_year, expected_next_start_quarter_and_year)
def test_get_next_start_quarter_and_year_given_end_quarter_and_year(self):
"""
Should get the next start_quarter_and_year tuple as end_quarter_and_year tuple
minus the default_batch_size_in_quarters
plus net quarter increment when default_batch_size_in_quarters and end_quarter_and_year_tuple are given
"""
end_quarter_and_year = (2, 2019,)
quarters_to_increment_by = 4
quarters_to_decrement_by = 8
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
quarter_based_export_site_source.end_quarter_and_year = end_quarter_and_year
net_quarters_to_increment_by = (
quarters_to_increment_by
- quarters_to_decrement_by
- quarter_based_export_site_source.default_batch_size_in_quarters)
next_start_quarter_and_year = quarter_based_export_site_source._get_next_start_quarter_and_year(
quarters_to_increment_by=quarters_to_increment_by, quarters_to_decrement_by=quarters_to_decrement_by)
expected_next_start_quarter_and_year = update_quarter_year_tuple(
end_quarter_and_year, quarters_to_increment_by=net_quarters_to_increment_by,
quarters_to_decrement_by=0)
self.assertTupleEqual(next_start_quarter_and_year, expected_next_start_quarter_and_year)
def test_get_next_start_quarter_and_year_with_initial_start_quarter_and_year(self):
"""
Should get the next start_quarter_and_year tuple as start_quarter_and_year
plus net quarter increment when start_quarter_and_year is given,
regardless of end_quarter_and_year and default_batch_size_in_quarters
"""
end_quarter_and_year = (1, 2019)
start_quarter_and_year = (3, 2020,)
quarters_to_increment_by = 4
quarters_to_decrement_by = 15
net_quarters_to_increment_by = quarters_to_increment_by - quarters_to_decrement_by
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
quarter_based_export_site_source.end_quarter_and_year = end_quarter_and_year
quarter_based_export_site_source.start_quarter_and_year = start_quarter_and_year
next_start_quarter_and_year = quarter_based_export_site_source._get_next_start_quarter_and_year(
quarters_to_increment_by=quarters_to_increment_by, quarters_to_decrement_by=quarters_to_decrement_by)
expected_next_start_quarter_and_year = update_quarter_year_tuple(
start_quarter_and_year, quarters_to_increment_by=net_quarters_to_increment_by,
quarters_to_decrement_by=0)
self.assertTupleEqual(next_start_quarter_and_year, expected_next_start_quarter_and_year)
def test_get_next_end_quarter_and_year_with_no_initial_quarter_year_tuples(self):
"""
Should get the next end_quarter_and_year as current quarter_and_year tuple plus net quarter increment
when only default_batch_size_in_quarters is given
Assumption: current quarter_and_year tuple is end_quarter_and_year if no increment or decrement
"""
today = date.today()
current_quarter_and_year_tuple = convert_date_to_quarter_year_tuple(today)
quarters_to_increment_by = 4
quarters_to_decrement_by = 9
net_quarters_to_increment_by = quarters_to_increment_by - quarters_to_decrement_by
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
next_end_quarter_and_year = quarter_based_export_site_source._get_next_end_quarter_and_year(
quarters_to_increment_by=quarters_to_increment_by, quarters_to_decrement_by=quarters_to_decrement_by)
expected_next_end_quarter_and_year = update_quarter_year_tuple(
current_quarter_and_year_tuple, quarters_to_increment_by=net_quarters_to_increment_by,
quarters_to_decrement_by=0)
self.assertTupleEqual(next_end_quarter_and_year, expected_next_end_quarter_and_year)
def test_get_next_end_quarter_and_year_given_start_quarter_and_year(self):
"""
Should get the next end quarter_and_year as start_quarter_and_year plus the net quarter increment
plus default_batch_size_in_quarters
when default_batch_size_in_quarters and start_quarter_and_year are given and no initial end_quarter_and_year is given
"""
start_quarter_and_year = (2, 2020)
quarters_to_increment_by = 4
quarters_to_decrement_by = 9
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
quarter_based_export_site_source.start_quarter_and_year = start_quarter_and_year
net_quarters_to_increment_by = (
quarters_to_increment_by
- quarters_to_decrement_by
+ quarter_based_export_site_source.default_batch_size_in_quarters
)
next_end_quarter_and_year = quarter_based_export_site_source._get_next_end_quarter_and_year(
quarters_to_increment_by=quarters_to_increment_by, quarters_to_decrement_by=quarters_to_decrement_by)
expected_next_end_quarter_and_year = update_quarter_year_tuple(
start_quarter_and_year, quarters_to_increment_by=net_quarters_to_increment_by,
quarters_to_decrement_by=0)
self.assertTupleEqual(next_end_quarter_and_year, expected_next_end_quarter_and_year)
def test_get_next_end_quarter_and_year_with_initial_end_quarter_and_year(self):
"""
Should get the next end_quarter_and_year as end_quarter_and_year plus
net quarter increment when end_quarter_and_year is given,
regardless of start_quarter_and_year and default_batch_size_in_quarters
"""
end_quarter_and_year = (1, 2020)
start_quarter_and_year = (3, 2020,)
quarters_to_increment_by = 4
quarters_to_decrement_by = 18
net_quarters_to_increment_by = quarters_to_increment_by - quarters_to_decrement_by
quarter_based_export_site_source = ChildQuarterBasedExportSiteSource()
quarter_based_export_site_source.end_quarter_and_year = end_quarter_and_year
quarter_based_export_site_source.start_quarter_and_year = start_quarter_and_year
next_end_quarter_and_year = quarter_based_export_site_source._get_next_end_quarter_and_year(
quarters_to_increment_by=quarters_to_increment_by, quarters_to_decrement_by=quarters_to_decrement_by)
expected_next_end_quarter_and_year = update_quarter_year_tuple(
end_quarter_and_year, quarters_to_increment_by=net_quarters_to_increment_by,
quarters_to_decrement_by=0)
self.assertTupleEqual(next_end_quarter_and_year, expected_next_end_quarter_and_year)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6440476
|
#!/usr/bin/env python3
from io import StringIO
from gentokenlookup import gentokenlookup
# copied from llhttp.h, and stripped trailing spaces and backslashes.
SRC = '''
XX(0, DELETE, DELETE)
XX(1, GET, GET)
XX(2, HEAD, HEAD)
XX(3, POST, POST)
XX(4, PUT, PUT)
XX(5, CONNECT, CONNECT)
XX(6, OPTIONS, OPTIONS)
XX(7, TRACE, TRACE)
XX(8, COPY, COPY)
XX(9, LOCK, LOCK)
XX(10, MKCOL, MKCOL)
XX(11, MOVE, MOVE)
XX(12, PROPFIND, PROPFIND)
XX(13, PROPPATCH, PROPPATCH)
XX(14, SEARCH, SEARCH)
XX(15, UNLOCK, UNLOCK)
XX(16, BIND, BIND)
XX(17, REBIND, REBIND)
XX(18, UNBIND, UNBIND)
XX(19, ACL, ACL)
XX(20, REPORT, REPORT)
XX(21, MKACTIVITY, MKACTIVITY)
XX(22, CHECKOUT, CHECKOUT)
XX(23, MERGE, MERGE)
XX(24, MSEARCH, M-SEARCH)
XX(25, NOTIFY, NOTIFY)
XX(26, SUBSCRIBE, SUBSCRIBE)
XX(27, UNSUBSCRIBE, UNSUBSCRIBE)
XX(28, PATCH, PATCH)
XX(29, PURGE, PURGE)
XX(30, MKCALENDAR, MKCALENDAR)
XX(31, LINK, LINK)
XX(32, UNLINK, UNLINK)
XX(33, SOURCE, SOURCE)
'''
if __name__ == '__main__':
methods = []
for line in StringIO(SRC):
line = line.strip()
if not line.startswith('XX'):
continue
_, m, _ = line.split(',', 2)
methods.append(m.strip())
gentokenlookup(methods, 'HTTP_')
|
StarcoderdataPython
|
9733548
|
<reponame>sanjaynirmal/blue-marlin<filename>Processes/dlpredictor/tests/test_dlpredictor_system_errors_11/test_dlpredictor_system_errors_11.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ims_esclient import ESClient
from pyspark import SparkContext, SparkConf, Row
from pyspark.sql.functions import concat_ws, count, lit, col, udf, expr, collect_list
from pyspark.sql import HiveContext
import time
import pandas as pd
"""
This program based on the mode, get the list of dense or non-dense uckeys and get the total number of predicted value for each slot-id.
spark-submit --master yarn --num-executors 2 --executor-cores 5 --jars lib/elasticsearch-hadoop-6.8.0.jar test_dlpredictor_system_errors_11.py
Author: Faezeh
"""
mode = "SPARSE" #"DENSE"
si_list = [
'06',
'11',
'05',
'04',
'03',
'02',
'01',
'l03493p0r3',
'x0ej5xhk60kjwq',
'g7m2zuits8',
'w3wx3nv9ow5i97',
'a1nvkhk62q',
'g9iv6p4sjy',
'c4n08ku47t',
'b6le0s4qo8',
'd9jucwkpr3',
'p7gsrebd4m',
'a8syykhszz',
'l2d4ec6csv',
'j1430itab9wj3b',
's4z85pd1h8',
'z041bf6g4s',
'<KEY>',
'a47eavw7ex',
'<KEY>',
'<KEY>',
'<KEY>',
'f1iprgyl13',
'q4jtehrqn2',
'm1040xexan',
'd971z9825e',
'a290af82884e11e5bdec00163e291137',
'w9fmyd5r0i',
'x2fpfbm8rt',
'e351de37263311e6af7500163e291137',
'k4werqx13k',
'5cd1c663263511e6af7500163e291137',
'17dd6d8098bf11e5bdec00163e291137',
'd4d7362e879511e5bdec00163e291137',
'15e9ddce941b11e5bdec00163e291137']
# uc = "native,s4z85pd1h8,WIFI,g_m,5,CPC,40,80"
sc = SparkContext()
hive_context = HiveContext(sc)
sc.setLogLevel('WARN')
# si = "s4z85pd1h8"
###################################### read es and get the query!!!###############################
# read es
es_host = '10.213.37.41'
es_port = '9200'
# This is a correct index
es_index = 'dlpredictor_05062021_predictions'
es_type = 'doc'
# Load the prediction counts into a dictionary.
start_time = time.time()
es = ESClient(es_host, es_port, es_index, es_type)
def calc(s):
command = """
SELECT
a.uckey
from dlpm_03182021_tmp_ts as a
join
dlpm_03182021_tmp_distribution as b
on a.uckey = b.uckey where a.si ='{}' and b.ratio = 1
""".format(s)
df = hive_context.sql(command)
dense_uckey = df.select('uckey').toPandas()
command = """
SELECT
a.uckey
from dlpm_03182021_tmp_ts as a
join
dlpm_03182021_tmp_distribution as b
on a.uckey = b.uckey where a.si = '{}' and b.ratio != 1
""".format(s)
df = hive_context.sql(command)
sparse_uckey = df.select('uckey').toPandas()
if mode == "DENSE":
uckey_list = dense_uckey.uckey
if mode == "SPARSE":
uckey_list = sparse_uckey.uckey
l = []
for uc in uckey_list:
body ={
"size": 100,
"query": {"bool": {"must": [
{"match": {
"_id": uc
}}
]}}
}
hits = es.search(body)
es_records = {}
uckeys = []
predicted_days = set()
for ucdoc in hits:
uckey = ucdoc['uckey']
uckeys.append(uckey)
predictions = ucdoc['ucdoc']['predictions']
# predictions = ["2020-06-01"]
for day, hours in predictions.items():
predicted_days.add(day)
hour = -1
h0 = h1 = h2 = h3 = 0
for hour_doc in hours['hours']:
hour += 1
es_records[(uckey, day, hour, '0')] = hour_doc['h0']
es_records[(uckey, day, hour, '1')] = hour_doc['h1']
es_records[(uckey, day, hour, '2')] = hour_doc['h2']
es_records[(uckey, day, hour, '3')] = hour_doc['h3']
h0 += hour_doc['h0']
h1 += hour_doc['h1']
h2 += hour_doc['h2']
h3 += hour_doc['h3']
# print('h0: {} : {} h1: {} : {} h2: {} : {} h3: {} : {}'.format(
# hour_doc['h0'], h0, hour_doc['h1'], h1, hour_doc['h2'], h2, hour_doc['h3'], h3))
es_records[(uckey, day, '0')] = h0
es_records[(uckey, day, '1')] = h1
es_records[(uckey, day, '2')] = h2
es_records[(uckey, day, '3')] = h3
es_records[(uckey, day)] = h0 + h1 + h2 + h3
# print('daily: {} {} {} {} : {}'.format(h0, h1, h2, h3, h0+h1+h2+h3))
l.append([uckey,day, es_records[(uckey, day)]])
data = pd.DataFrame(l, columns = ['uckey','day','agg'])
agg_data = data.groupby(by = 'day').sum()
return agg_data
for si in si_list:
print(si,calc(s = si))
# agg_data.to_csv('/home/reza/faezeh/dense.csv')
|
StarcoderdataPython
|
288267
|
try:
import pygame, os, time
except:
print('cmd run: pip3 install pygame -i https://mirrors.aliyun.com/pypi/simple')
exit()
from pygame.locals import *
from game import Game
from ai import Ai
from config import *
# config = Development()
config = SupperFast()
FPS = config.FPS
SIZE = config.SIZE
DEBUG = config.DEBUG
colors = config.COLORS
GAME_WH = config.GAME_WH
WINDOW_W = config.WINDOW_W
WINDOW_H = config.WINDOW_H
# 格子中的字体
font_h_w = 2 / 1
g_w = GAME_WH / SIZE * 0.9
# font = pygame.font.SysFont('microsoftyahei', 20)
class Main():
def __init__(self):
global FPS
pygame.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (100, 50)
self.set_win_wh(WINDOW_W, WINDOW_H, title='2048')
self.state = 'start'
self.fps = FPS
self.catch_n = 0
self.clock = pygame.time.Clock()
self.game = Game(SIZE)
self.ai = Ai()
self.step_time = config.STEP_TIME
self.next_f = ''
self.last_time = time.time()
self.jm = -1
def start(self):
# 加载按钮
self.button_list = [
Button('start', '重新开始', (GAME_WH + 50, 150)),
Button('ai', '电脑托管', (GAME_WH + 50, 250)),
]
self.run()
def run(self):
while self.state != 'exit':
if self.game.state in ['over', 'win']:
self.state = self.game.state
self.my_event()
if self.next_f != '' and (
self.state == 'run' or self.state == 'ai' and time.time() - self.last_time > self.step_time):
self.game.run(self.next_f)
self.next_f = ''
self.last_time = time.time()
elif self.state == 'start':
self.game.start()
self.state = 'run'
self.set_bg((101, 194, 148))
self.draw_info()
self.draw_button(self.button_list)
self.draw_map()
self.update()
print('退出游戏')
def draw_map(self):
for y in range(SIZE):
for x in range(SIZE):
self.draw_block((x, y), self.game.grid.tiles[y][x])
if self.state == 'over':
pygame.draw.rect(self.screen, (0, 0, 0, 0.5),
(0, 0, GAME_WH, GAME_WH))
self.draw_text('游戏结束!', (GAME_WH / 2, GAME_WH / 2), size=25, center='center')
elif self.state == 'win':
pygame.draw.rect(self.screen, (0, 0, 0, 0.5),
(0, 0, GAME_WH, GAME_WH))
self.draw_text('胜利!', (GAME_WH / 2, GAME_WH / 2), size=25, center='center')
# 画一个方格
def draw_block(self, xy, number):
one_size = GAME_WH / SIZE
dx = one_size * 0.05
x, y = xy[0] * one_size, xy[1] * one_size
# print(colors[str(int(number))])
color = colors[str(int(number))] if number <= 2048 else (0, 0, 255)
pygame.draw.rect(self.screen, color,
(x + dx, y + dx, one_size - 2 * dx, one_size - 2 * dx))
color = (20, 20, 20) if number <= 4 else (250, 250, 250)
if number != 0:
ln = len(str(number))
if ln == 1:
size = one_size * 1.2 / 2
elif ln <= 3:
size = one_size * 1.2 / ln
else:
size = one_size * 1.5 / ln
self.draw_text(str(int(number)), (x + one_size * 0.5, y + one_size * 0.5 - size / 2), color, size, 'center')
def draw_info(self):
self.draw_text('分数:{}'.format(self.game.score), (GAME_WH + 50, 40))
if self.state == 'ai':
self.draw_text('间隔:{}'.format(self.step_time), (GAME_WH + 50, 60))
self.draw_text('评分:{}'.format(self.jm), (GAME_WH + 50, 80))
def set_bg(self, color=(255, 255, 255)):
self.screen.fill(color)
def catch(self, filename=None):
if filename is None:
filename = "./catch/catch-{:04d}.png".format(self.catch_n)
pygame.image.save(self.screen, filename)
self.catch_n += 1
def draw_button(self, buttons):
for b in buttons:
if b.is_show:
pygame.draw.rect(self.screen, (180, 180, 200),
(b.x, b.y, b.w, b.h))
self.draw_text(b.text, (b.x + b.w / 2, b.y + 9), size=18, center='center')
def draw_text(self, text, xy, color=(0, 0, 0), size=18, center=None):
font = pygame.font.SysFont('simhei', round(size))
text_obj = font.render(text, 1, color)
text_rect = text_obj.get_rect()
if center == 'center':
text_rect.move_ip(xy[0] - text_rect.w // 2, xy[1])
else:
text_rect.move_ip(xy[0], xy[1])
# print('画文字:',text,text_rect)
self.screen.blit(text_obj, text_rect)
# 设置窗口大小
def set_win_wh(self, w, h, title='python游戏'):
self.screen2 = pygame.display.set_mode((w, h), pygame.DOUBLEBUF, 32)
self.screen = self.screen2.convert_alpha()
pygame.display.set_caption(title)
def update(self):
self.screen2.blit(self.screen, (0, 0))
# 刷新画面
# pygame.display.update()
pygame.display.flip()
time_passed = self.clock.tick(self.fps)
# 侦听事件
def my_event(self):
if self.state == 'ai' and self.next_f == '':
self.next_f, self.jm = self.ai.get_next(self.game.grid.tiles)
for event in pygame.event.get():
if event.type == QUIT:
self.state = 'exit'
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.state = 'exit'
elif event.key in [K_LEFT, K_a] and self.state == 'run':
self.next_f = 'L'
elif event.key in [K_RIGHT, K_d] and self.state == 'run':
self.next_f = 'R'
elif event.key in [K_DOWN, K_s] and self.state == 'run':
self.next_f = 'D'
elif event.key in [K_UP, K_w] and self.state == 'run':
self.next_f = 'U'
elif event.key in [K_k, K_l] and self.state == 'ai':
if event.key == K_k and self.step_time > 0:
self.step_time *= 0.9
if event.key == K_l and self.step_time < 10:
if self.step_time != 0:
self.step_time *= 1.1
else:
self.step_time = 0.01
if self.step_time < 0:
self.step_time = 0
if event.type == MOUSEBUTTONDOWN:
for i in self.button_list:
if i.is_click(event.pos):
self.state = i.name
if i.name == 'ai':
i.name = 'run'
i.text = '取消托管'
elif i.name == 'run':
i.name = 'ai'
i.text = '电脑托管'
break
def run():
Main().start()
# 按钮类
class Button(pygame.sprite.Sprite):
def __init__(self, name, text, xy, size=(100, 50)):
pygame.sprite.Sprite.__init__(self)
self.name = name
self.text = text
self.x, self.y = xy[0], xy[1]
self.w, self.h = size
self.is_show = True
def is_click(self, xy):
return (self.is_show and
self.x <= xy[0] <= self.x + self.w and
self.y <= xy[1] <= self.y + self.h)
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
6573795
|
from django.contrib import admin
from .models import Member
@admin.register(Member)
class MemberAdmin(admin.ModelAdmin):
search_fields = ['first_name', 'last_name', 'email', 'website']
list_display = ('first_name', 'last_name', 'email')
list_display_links = ('first_name', 'last_name', 'email')
fieldsets = [
('Name', {'fields': ['first_name', 'last_name']}),
('Personal Information', {'fields': ['email', 'profile_image', 'website']}),
]
|
StarcoderdataPython
|
1877054
|
from .base import BaseModel, Scraper
from .popolo import Organization
from .schemas.jurisdiction import schema
from ..metadata import lookup
_name_fixes = {
"SouthCarolina": "South Carolina",
"NorthCarolina": "North Carolina",
"SouthDakota": "South Dakota",
"NorthDakota": "North Dakota",
"RhodeIsland": "Rhode Island",
"NewHampshire": "New Hampshire",
"NewJersey": "New Jersey",
"NewYork": "New York",
"NewMexico": "New Mexico",
"WestVirginia": "West Virginia",
"PuertoRico": "Puerto Rico",
"DistrictOfColumbia": "District of Columbia",
"UnitedStates": "United States",
}
class State(BaseModel):
""" Base class for a jurisdiction """
_type = "jurisdiction"
_schema = schema
# schema objects
legislative_sessions = []
extras = {}
# non-db properties
scrapers = {}
default_scrapers = None
ignored_scraped_sessions = []
_metadata = None
def __init__(self):
super(BaseModel, self).__init__()
self._related = []
self.extras = {}
@property
def classification(self):
return "state" if self.name != "United States" else "country"
@property
def metadata(self):
if not self._metadata:
name = _name_fixes.get(self.__class__.__name__, self.__class__.__name__)
self._metadata = lookup(name=name)
return self._metadata
@property
def division_id(self):
return self.metadata.division_id
@property
def jurisdiction_id(self):
return "{}/government".format(
self.division_id.replace("ocd-division", "ocd-jurisdiction"),
)
@property
def name(self):
return self.metadata.name
@property
def url(self):
return self.metadata.url
def get_organizations(self):
legislature = Organization(
name=self.metadata.legislature_name, classification="legislature"
)
yield legislature
if not self.metadata.unicameral:
yield Organization(
self.metadata.upper.name,
classification="upper",
parent_id=legislature._id,
)
yield Organization(
self.metadata.lower.name,
classification="lower",
parent_id=legislature._id,
)
def get_session_list(self) -> list[str]:
raise NotImplementedError()
_id = jurisdiction_id
def as_dict(self):
return {
"_id": self.jurisdiction_id,
"id": self.jurisdiction_id,
"name": self.name,
"url": self.url,
"division_id": self.division_id,
"classification": self.classification,
"legislative_sessions": self.legislative_sessions,
"extras": self.extras,
}
def __str__(self):
return self.name
class JurisdictionScraper(Scraper):
def scrape(self):
# yield a single Jurisdiction object
yield self.jurisdiction
# yield all organizations
for org in self.jurisdiction.get_organizations():
yield org
|
StarcoderdataPython
|
3344778
|
<reponame>aarunsai81/netapp<filename>cinder/scheduler/filter_scheduler.py
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The FilterScheduler is for creating volumes.
You can customize this scheduler by specifying your own volume Filters and
Weighing Functions.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""Schedule contract that returns best-suited host for this request."""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties.
Can be overridden in a subclass to add more data.
"""
vol = request_spec['volume_properties']
filter_properties['size'] = vol['size']
filter_properties['availability_zone'] = vol.get('availability_zone')
filter_properties['user_id'] = vol.get('user_id')
filter_properties['metadata'] = vol.get('metadata')
filter_properties['qos_specs'] = vol.get('qos_specs')
def schedule_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
weighed_host = self._schedule_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_host:
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
updated_group = driver.group_update_db(context, group, host)
self.volume_rpcapi.create_consistencygroup(context,
updated_group, host)
def schedule_create_group(self, context, group,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list):
weighed_host = self._schedule_generic_group(
context,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
if not weighed_host:
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
updated_group = driver.generic_group_update_db(context, group, host)
self.volume_rpcapi.create_group(context,
updated_group, host)
def schedule_create_volume(self, context, request_spec, filter_properties):
weighed_host = self._schedule(context, request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
volume_id = request_spec['volume_id']
updated_volume = driver.volume_update_db(context, volume_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.volume_rpcapi.create_volume(context, updated_volume, host,
request_spec, filter_properties,
allow_reschedule=True)
def host_passes_filters(self, context, host, request_spec,
filter_properties):
"""Check if the specified host passes the filters."""
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
if host_state.host == host:
return host_state
volume_id = request_spec.get('volume_id', '??volume_id missing??')
raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on '
'%(host)s') %
{'id': volume_id,
'host': host})
def find_retype_host(self, context, request_spec, filter_properties=None,
migration_policy='never'):
"""Find a host that can accept the volume with its new type."""
filter_properties = filter_properties or {}
current_host = request_spec['volume_properties']['host']
# The volume already exists on this host, and so we shouldn't check if
# it can accept the volume again in the CapacityFilter.
filter_properties['vol_exists_on'] = current_host
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
if not weighed_hosts:
raise exception.NoValidHost(reason=_('No valid hosts for volume '
'%(id)s with type %(type)s') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
if host_state.host == current_host:
return host_state
if utils.extract_host(current_host, 'pool') is None:
# legacy volumes created before pool is introduced has no pool
# info in host. But host_state.host always include pool level
# info. In this case if above exact match didn't work out, we
# find host_state that are of the same host of volume being
# retyped. In other words, for legacy volumes, retyping could
# cause migration between pools on same host, which we consider
# it is different from migration between hosts thus allow that
# to happen even migration policy is 'never'.
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
backend = utils.extract_host(host_state.host, 'backend')
if backend == current_host:
return host_state
if migration_policy == 'never':
raise exception.NoValidHost(reason=_('Current host not valid for '
'volume %(id)s with type '
'%(type)s, migration not '
'allowed') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
top_host = self._choose_top_host(weighed_hosts, request_spec)
return top_host.obj
def get_pools(self, context, filters):
# TODO(zhiteng) Add filters support
return self.host_manager.get_pools(context)
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Populate filter properties with additional information.
Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend.
In the event that the request gets re-scheduled, this entry will signal
that the given backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.InvalidParameterValue(
err=_("Invalid value for 'scheduler_max_attempts', "
"must be >=1"))
return max_attempts
def _log_volume_error(self, volume_id, retry):
"""Log requests with exceptions from previous volume operations."""
exc = retry.pop('exc', None) # string-ified exception from volume
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
"%(last_host)s : %(exc)s"),
{'volume_id': volume_id,
'last_host': last_host,
'exc': exc})
def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for request.
If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of volume service hosts tried
}
filter_properties['retry'] = retry
volume_id = properties.get('volume_id')
self._log_volume_error(volume_id, retry)
if retry['num_attempts'] > max_attempts:
raise exception.NoValidHost(
reason=_("Exceeded max scheduling attempts %(max_attempts)d "
"for volume %(volume_id)s") %
{'max_attempts': max_attempts,
'volume_id': volume_id})
def _get_weighted_candidates(self, context, request_spec,
filter_properties=None):
"""Return a list of hosts that meet required specs.
Returned list is ordered by their fitness.
"""
elevated = context.elevated()
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
volume_type = resource_type = request_spec.get("volume_type")
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties,
request_spec['volume_properties'])
request_spec_dict = jsonutils.to_primitive(request_spec)
filter_properties.update({'context': context,
'request_spec': request_spec_dict,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# If multiattach is enabled on a volume, we need to add
# multiattach to extra specs, so that the capability
# filtering is enabled.
multiattach = request_spec['volume_properties'].get('multiattach',
False)
if multiattach and 'multiattach' not in resource_type.get(
'extra_specs', {}):
if 'extra_specs' not in resource_type:
resource_type['extra_specs'] = {}
resource_type['extra_specs'].update(
multiattach='<is> True')
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
return weighed_hosts
def _get_weighted_candidates_group(self, context, request_spec_list,
filter_properties_list=None):
"""Finds hosts that supports the consistencygroup.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_hosts = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add consistencygroup_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
if 'consistencygroup_support' not in resource_type.get(
'extra_specs', {}):
resource_type['extra_specs'].update(
consistencygroup_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated)
if not all_hosts:
return []
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
temp_weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not temp_weighed_hosts:
return []
if index == 0:
weighed_hosts = temp_weighed_hosts
else:
new_weighed_hosts = []
for host1 in weighed_hosts:
for host2 in temp_weighed_hosts:
# Should schedule creation of CG on backend level,
# not pool level.
if (utils.extract_host(host1.obj.host) ==
utils.extract_host(host2.obj.host)):
new_weighed_hosts.append(host1)
weighed_hosts = new_weighed_hosts
if not weighed_hosts:
return []
index += 1
return weighed_hosts
def _get_weighted_candidates_generic_group(
self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
"""Finds hosts that supports the group.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
hosts_by_group_type = self._get_weighted_candidates_by_group_type(
context, group_spec, group_filter_properties)
weighed_hosts = []
hosts_by_vol_type = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add group_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
# if 'group_support' not in resource_type.get(
# 'extra_specs', {}):
# resource_type['extra_specs'].update(
# group_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated)
if not all_hosts:
return []
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
temp_weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not temp_weighed_hosts:
return []
if index == 0:
hosts_by_vol_type = temp_weighed_hosts
else:
hosts_by_vol_type = self._find_valid_hosts(
hosts_by_vol_type, temp_weighed_hosts)
if not hosts_by_vol_type:
return []
index += 1
# Find hosts selected by both the group type and volume types.
weighed_hosts = self._find_valid_hosts(hosts_by_vol_type,
hosts_by_group_type)
return weighed_hosts
def _find_valid_hosts(self, host_list1, host_list2):
new_hosts = []
for host1 in host_list1:
for host2 in host_list2:
# Should schedule creation of group on backend level,
# not pool level.
if (utils.extract_host(host1.obj.host) ==
utils.extract_host(host2.obj.host)):
new_hosts.append(host1)
if not new_hosts:
return []
return new_hosts
def _get_weighted_candidates_by_group_type(
self, context, group_spec,
group_filter_properties=None):
"""Finds hosts that supports the group type.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_hosts = []
volume_properties = group_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
group_type = group_spec.get("group_type", None)
resource_type = group_spec.get("group_type", None)
group_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if group_filter_properties is None:
group_filter_properties = {}
self._populate_retry(group_filter_properties, resource_properties)
group_filter_properties.update({'context': context,
'request_spec': group_spec,
'config_options': config_options,
'group_type': group_type,
'resource_type': resource_type})
self.populate_filter_properties(group_spec,
group_filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated)
if not all_hosts:
return []
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
group_filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
group_filter_properties)
if not weighed_hosts:
return []
return weighed_hosts
def _schedule(self, context, request_spec, filter_properties=None):
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
# When we get the weighed_hosts, we clear those hosts whose backend
# is not same as consistencygroup's backend.
if request_spec.get('CG_backend'):
group_backend = request_spec.get('CG_backend')
else:
group_backend = request_spec.get('group_backend')
if weighed_hosts and group_backend:
# Get host name including host@backend#pool info from
# weighed_hosts.
for host in weighed_hosts[::-1]:
backend = utils.extract_host(host.obj.host)
if backend != group_backend:
weighed_hosts.remove(host)
if not weighed_hosts:
LOG.warning(_LW('No weighed hosts found for volume '
'with properties: %s'),
filter_properties['request_spec'].get('volume_type'))
return None
return self._choose_top_host(weighed_hosts, request_spec)
def _schedule_group(self, context, request_spec_list,
filter_properties_list=None):
weighed_hosts = self._get_weighted_candidates_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_hosts:
return None
return self._choose_top_host_group(weighed_hosts, request_spec_list)
def _schedule_generic_group(self, context, group_spec, request_spec_list,
group_filter_properties=None,
filter_properties_list=None):
weighed_hosts = self._get_weighted_candidates_generic_group(
context,
group_spec,
request_spec_list,
group_filter_properties,
filter_properties_list)
if not weighed_hosts:
return None
return self._choose_top_host_generic_group(weighed_hosts)
def _choose_top_host(self, weighed_hosts, request_spec):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
volume_properties = request_spec['volume_properties']
host_state.consume_from_volume(volume_properties)
return top_host
def _choose_top_host_group(self, weighed_hosts, request_spec_list):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
return top_host
def _choose_top_host_generic_group(self, weighed_hosts):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
return top_host
|
StarcoderdataPython
|
8060021
|
from mypastebot import Search
# find 10 pastes with the keyword Python
pastes = Search.find(term='Python', limit=10) # min limit is 1 and max limit is 15
# print the results
print(pastes['results'])
# find 10 pastes with the keyword Python sorted by date
pastes = Search.find(term='Python', limit=10, sortType='date')
# print the results
print(pastes['results'])
|
StarcoderdataPython
|
11374158
|
"""
Configuration of pytest for agent tests
"""
from pathlib import Path
from textwrap import dedent
from unittest.mock import patch
import httpx
import respx
from pytest import fixture
from lm_agent.backend_utils import BackendConfigurationRow
from lm_agent.config import settings
MOCK_BIN_PATH = Path(__file__).parent / "mock_tools"
@fixture(autouse=True)
def mock_cache_dir(tmp_path):
_cache_dir = tmp_path / "license-manager-cache"
assert not _cache_dir.exists()
with patch("lm_agent.config.settings.CACHE_DIR", new=_cache_dir):
yield _cache_dir
@fixture
def license_servers():
return ["192.168.127.12 2345", "172.16.31.10 2345"]
@fixture
def respx_mock():
"""
Run a test in the respx context (similar to respx decorator, but it's a fixture).
Mocks the auth0 route used to secure a token.
"""
with respx.mock as mock:
respx.post(f"https://{settings.AUTH0_DOMAIN}/oauth/token").mock(
return_value=httpx.Response(status_code=200, json=dict(access_token="dummy-token"))
)
yield mock
@fixture
def one_configuration_row_flexlm():
return BackendConfigurationRow(
product="testproduct",
features={"testfeature": 10},
license_servers=["flexlm:127.0.0.1:2345"],
license_server_type="flexlm",
grace_time=10000,
)
@fixture
def one_configuration_row_rlm():
return BackendConfigurationRow(
product="converge",
features={"converge_super": 10},
license_servers=["rlm:127.0.0.1:2345"],
license_server_type="rlm",
grace_time=10000,
)
@fixture
def one_configuration_row_lsdyna():
return BackendConfigurationRow(
product="mppdyna",
features={"mppdyna": 500},
license_servers=["lsdyna:127.0.0.1:2345"],
license_server_type="lsdyna",
grace_time=10000,
)
@fixture
def one_configuration_row_lmx():
return BackendConfigurationRow(
product="hyperworks",
features={"hyperworks": 1000000},
license_servers=["lmx:127.0.0.1:2345"],
license_server_type="lmx",
grace_time=10000,
)
@fixture
def lmstat_output_bad():
"""
Some unparseable lmstat output
"""
return dedent(
"""\
lmstat - Copyright (c) 1989-2004 by Macrovision Corporation. All rights reserved.
Flexible License Manager status on Wed 03/31/2021 09:12
Error getting status: Cannot connect to license server (-15,570:111 "Connection refused")
"""
)
@fixture
def lmstat_output():
"""
Some lmstat output to parse
"""
return dedent(
"""\
lmstat - Copyright (c) 1989-2004 by Macrovision Corporation. All rights reserved.
...
Users of TESTFEATURE: (Total of 1000 licenses issued; Total of 93 licenses in use)
...
"""
" jbemfv myserver.example.com /dev/tty (v62.2) (myserver.example.com/24200 12507), "
"start Thu 10/29 8:09, 29 licenses\n"
" cdxfdn myserver.example.com /dev/tty (v62.2) (myserver.example.com/24200 12507), "
"start Thu 10/29 8:09, 27 licenses\n"
" jbemfv myserver.example.com /dev/tty (v62.2) (myserver.example.com/24200 12507), "
"start Thu 10/29 8:09, 37 licenses\n"
)
@fixture
def lmstat_output_no_licenses():
"""
Some lmstat output with no licenses in use to parse
"""
return dedent(
"""\
lmstat - Copyright (c) 1989-2004 by Macrovision Corporation. All rights reserved.
...
Users of TESTFEATURE: (Total of 1000 licenses issued; Total of 0 licenses in use)
...
"""
)
@fixture
def rlm_output_bad():
"""
Some unparseable lmstat output
"""
return dedent(
"""\
rlmutil v12.2
Copyright (C) 2006-2017, Reprise Software, Inc. All rights reserved.
Error connecting to "rlm" server
Connection attempted to host: "" on port 5053
No error
"""
)
@fixture
def rlm_output():
"""
Some rlm output to parse
"""
return dedent(
"""\
Setting license file path to <EMAIL>
rlmutil v12.2
Copyright (C) 2006-2017, Reprise Software, Inc. All rights reserved.
rlm status on licserv.com (port 35015), up 99d 11:08:25
rlm software version v12.2 (build:2)
rlm comm version: v1.2
Startup time: Tue Oct 19 01:40:13 2021
Todays Statistics (13:48:32), init time: Tue Nov 2 23:00:06 2021
Recent Statistics (00:16:08), init time: Wed Nov 3 12:32:30 2021
Recent Stats Todays Stats Total Stats
00:16:08 13:48:32 15d 11:08:25
Messages: 582 (0/sec) 28937 (0/sec) 777647 (0/sec)
Connections: 463 (0/sec) 23147 (0/sec) 622164 (0/sec)
--------- ISV servers ----------
Name Port Running Restarts
csci 63133 Yes 0
------------------------
csci ISV server status on licserv.server.com (port 63133), up 99d 11:08:18
csci software version v12.2 (build: 2)
csci comm version: v1.2
csci Debug log filename: F:\RLM\Logs\csci.dlog
csci Report log filename: F:\RLM\logs\Reportlogs\CSCILOG.rl
Startup time: Tue Oct 19 01:40:20 2021
Todays Statistics (13:48:32), init time: Tue Nov 2 23:00:06 2021
Recent Statistics (00:16:08), init time: Wed Nov 3 12:32:30 2021
Recent Stats Todays Stats Total Stats
00:16:08 13:48:32 15d 11:08:18
Messages: 991 (0/sec) 34770 (0/sec) 935961 (0/sec)
Connections: 945 (0/sec) 17359 (0/sec) 466699 (0/sec)
Checkouts: 0 (0/sec) 1 (0/sec) 937 (0/sec)
Denials: 0 (0/sec) 0 (0/sec) 0 (0/sec)
Removals: 0 (0/sec) 0 (0/sec) 0 (0/sec)
------------------------
csci license pool status on licser.server.com (port 63133)
converge v3.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 0
converge_gui v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_gui_polygonica v1.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_super v3.0
count: 1000, # reservations: 0, inuse: 93, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 169
converge_tecplot v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 16
------------------------
csci license usage status on licser.server.com (port 63133)
converge_super v3.0: <EMAIL> 29/0 at 11/01 09:01 (handle: 15a)
converge_super v3.0: <EMAIL> 27/0 at 11/03 10:38 (handle: 128)
converge_super v3.0: <EMAIL> 37/0 at 11/01 09:01 (handle: 15a)
"""
)
@fixture
def rlm_output_no_licenses():
"""
Some rlm output with no licenses in use to parse
"""
return dedent(
"""\
Setting license file path to <EMAIL>:<EMAIL>
rlmutil v12.2
Copyright (C) 2006-2017, Reprise Software, Inc. All rights reserved.
rlm status on licserv0011.com (port 35015), up 20d 13:21:16
rlm software version v12.2 (build:2)
rlm comm version: v1.2
Startup time: Tue Oct 19 03:40:13 2021
Todays Statistics (16:01:23), init time: Mon Nov 8 00:00:06 2021
Recent Statistics (00:28:35), init time: Mon Nov 8 15:32:54 2021
Recent Stats Todays Stats Total Stats
00:28:35 16:01:23 20d 13:21:16
Messages: 997 (0/sec) 33562 (0/sec) 1033736 (0/sec)
Connections: 797 (0/sec) 26849 (0/sec) 827039 (0/sec)
--------- ISV servers ----------
Name Port Running Restarts
csci 63133 Yes 0
------------------------
csci ISV server status on licserv0011.com (port 63133), up 20d 13:21:09
csci software version v12.2 (build: 2)
csci comm version: v1.2
csci Debug log filename: F:\RLM\Logs\csci.dlog
csci Report log filename: F:\RLM\logs\Reportlogs\CSCILOG.rl
Startup time: Tue Oct 19 03:40:20 2021
Todays Statistics (16:01:23), init time: Mon Nov 8 00:00:06 2021
Recent Statistics (00:28:35), init time: Mon Nov 8 15:32:54 2021
Recent Stats Todays Stats Total Stats
00:28:35 16:01:23 20d 13:21:09
Messages: 1196 (0/sec) 40276 (0/sec) 1243764 (0/sec)
Connections: 598 (0/sec) 20138 (0/sec) 620365 (0/sec)
Checkouts: 0 (0/sec) 0 (0/sec) 262 (0/sec)
Denials: 0 (0/sec) 0 (0/sec) 0 (0/sec)
Removals: 0 (0/sec) 0 (0/sec) 0 (0/sec)
------------------------
csci license pool status on licserv0011.com (port 63133)
converge v3.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 0
converge_gui v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_gui_polygonica v1.0
count: 1, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 26
converge_super v3.0
count: 1000, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 189
converge_tecplot v1.0
count: 45, # reservations: 0, inuse: 0, exp: 31-jan-2022
obsolete: 0, min_remove: 120, total checkouts: 21
"""
)
@fixture
def lsdyna_output_bad():
"""
Some unparseable lsdyna output
"""
return dedent(
"""\
Using default server 31010@localhost
*** ERROR failed to open server localhost
"""
)
@fixture
def lsdyna_output():
"""
Some lsdyna output to parse
"""
return dedent(
"""\
Using user specified server <EMAIL>
LICENSE INFORMATION
PROGRAM EXPIRATION CPUS USED FREE MAX | QUEUE
---------------- ---------- ----- ------ ------ | -----
MPPDYNA 12/30/2022 - 60 500 | 0
fane8y <EMAIL> 80
ssskmj <EMAIL> 80
ssskmj <EMAIL> 80
ywazrn <EMAIL> 80
ywazrn <EMAIL> 80
ndhtw9 <EMAIL> 40
MPPDYNA_971 12/30/2022 0 60 500 | 0
MPPDYNA_970 12/30/2022 0 60 500 | 0
MPPDYNA_960 12/30/2022 0 60 500 | 0
LS-DYNA 12/30/2022 0 60 500 | 0
LS-DYNA_971 12/30/2022 0 60 500 | 0
LS-DYNA_970 12/30/2022 0 60 500 | 0
LS-DYNA_960 12/30/2022 0 60 500 | 0
LICENSE GROUP 440 60 500 | 0
"""
)
@fixture
def lsdyna_output_no_licenses():
"""
Some lsdyna output with no licenses in use to parse
"""
return dedent(
"""\
Using user specified server <EMAIL>
LICENSE INFORMATION
PROGRAM EXPIRATION CPUS USED FREE MAX | QUEUE
---------------- ---------- ----- ------ ------ | -----
MPPDYNA 12/30/2022 0 500 500 | 0
MPPDYNA_971 12/30/2022 0 500 500 | 0
MPPDYNA_970 12/30/2022 0 000 500 | 0
MPPDYNA_960 12/30/2022 0 000 500 | 0
LS-DYNA 12/30/2022 0 000 500 | 0
LS-DYNA_971 12/30/2022 0 000 500 | 0
LS-DYNA_970 12/30/2022 0 000 500 | 0
LS-DYNA_960 12/30/2022 0 000 500 | 0
LICENSE GROUP 0 000 500 | 0
"""
)
@fixture
def lmx_output_bad():
"""
Some unparseable output
"""
return dedent(
"""\
LM-X End-user Utility v3.32
Copyright (C) 2002-2010 X-Formation. All rights reserved.
++++++++++++++++++++++++++++++++++++++++
LM-X license server(s):
----------------------------------------
There are no license server(s) available.
"""
)
@fixture
def lmx_output():
"""
Some LM-X output to parse
"""
return dedent(
"""\
LM-X End-user Utility v3.32
Copyright (C) 2002-2010 X-Formation. All rights reserved.
++++++++++++++++++++++++++++++++++++++++
LM-X License Server on <EMAIL>:
Server version: v5.1 Uptime: 3 day(s) 12 hour(s) 0 min(s) 51 sec(s)
----------------------------------------
Feature: CatiaV5Reader Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 3 license(s) used
----------------------------------------
Feature: GlobalZoneEU Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
40000 of 1000003 license(s) used:
15000 license(s) used by VRAAFG@RD0082879 [172.16.31.10]
Login time: 2022-02-18 09:26 Checkout time: 2022-02-18 09:29
Shared on custom string: VRAAFG:RD0082879
25000 license(s) used by VRAAFG@RD0082879 [172.16.31.10]
Login time: 2022-02-18 09:26 Checkout time: 2022-02-18 09:26
Shared on custom string: VRAAFG:RD0082879
----------------------------------------
Feature: HWAIFPBS Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HWAWPF Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HWActivate Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HWFlux2D Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
30000 of 2147483647 license(s) used:
15000 license(s) used by VRAAFG@RD0082879 [172.16.31.10]
Login time: 2022-02-18 09:26 Checkout time: 2022-02-18 09:29
Shared on custom string: VRAAFG:RD0082879:27164_23514544_1645091752_138525
15000 license(s) used by VRAAFG@RD0082879 [172.16.31.10]
Login time: 2022-02-18 09:26 Checkout time: 2022-02-18 09:26
Shared on custom string: VRAAFG:RD0082879:18896_1081950704_1645017269_309963
----------------------------------------
Feature: HyperWorks Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
25000 of 1000000 license(s) used:
25000 license(s) used by sssaah@RD0082406 [192.168.127.12]
Login time: 2022-02-18 09:26 Checkout time: 2022-02-18 09:26
Shared on custom string: sssaah:RD0082406
"""
)
@fixture
def lmx_output_no_licenses():
"""
Some LM-X output with no licenses in use to parse
"""
return dedent(
"""\
LM-X End-user Utility v3.32
Copyright (C) 2002-2010 X-Formation. All rights reserved.
++++++++++++++++++++++++++++++++++++++++
LM-X License Server on 6200@licserv0013.<EMAIL>:
Server version: v5.1 Uptime: 3 day(s) 12 hour(s) 0 min(s) 51 sec(s)
----------------------------------------
Feature: CatiaV5Reader Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 3 license(s) used
----------------------------------------
Feature: GlobalZoneEU Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 1000003 license(s) used
----------------------------------------
Feature: HWAIFPBS Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HWAWPF Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HWActivate Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HWFlux2D Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 2147483647 license(s) used
----------------------------------------
Feature: HyperWorks Version: 21.0 Vendor: ALTAIR
Start date: 2022-02-17 Expire date: 2023-01-31
Key type: EXCLUSIVE License sharing: CUSTOM VIRTUAL
0 of 1000000 license(s) used
"""
)
|
StarcoderdataPython
|
4842994
|
<filename>petkit_exporter/petkit.py<gh_stars>0
import datetime
import hashlib
from collections import namedtuple
from typing import Dict, List, Optional
import requests
PETKIT_API = "http://api.petkt.com"
EVENT_TYPES = {
5: "cleaning",
7: "reset",
10: "pet in the litter box",
8: "deorder"
}
START_REASON = {
0: "auto",
1: "periodic",
2: "manual"
}
PetEvent = namedtuple(
"PetEvent", [
"time_start",
"time_end",
"duration",
"name",
"weight",
],
defaults=(None,) * 5
)
CleanEvent = namedtuple(
"CleanEvent", [
"time_start",
"time_end",
"duration",
"event_name",
"trigger_reason",
"litter_percent",
"need_clean",
"deoder_percent",
"refill_deoder",
],
defaults=(None,) * 9
)
class PetkitURL:
LOGIN = "/latest/user/login"
USER_DETAILS = "/latest/user/details2"
DISCOVERY = "/latest/discovery/device_roster"
PURAX_DETAILS = "/latest/t3/device_detail"
PURAX_RECORDS = "/latest/t3/getDeviceRecord"
class PetKit:
def __init__(self, user_name: str, password: str) -> None:
self.user_name = user_name
self.password = <PASSWORD>(password.encode("<PASSWORD>()
self.access_token: Optional[str] = None
self.access_token_expiration: Optional[datetime.datetime] = None
self.user: Optional[Dict] = None
def maybe_login(self) -> None:
if (
self.access_token is not None
and self.access_token_expiration > datetime.datetime.utcnow()
):
return
r = requests.post(
f"{PETKIT_API}{PetkitURL.LOGIN}",
data={
"username": self.user_name,
"password": <PASSWORD>.password,
"encrypt": 1
}
)
r.raise_for_status()
session = r.json()["result"]["session"]
self.access_token = session["id"]
self.access_token_expiration = datetime.datetime.strptime(
session["createdAt"], "%Y-%m-%dT%H:%M:%S.%fZ"
) + datetime.timedelta(seconds=session["expiresIn"])
def _query(self, path: str) -> Dict:
self.maybe_login()
r = requests.post(
f"{PETKIT_API}{path}", headers={"X-Session": self.access_token, 'X-Api-Version': '8.1.0'}
)
r.raise_for_status()
response = r.json()
if response.get("error") is not None:
raise ValueError(response["error"]["msg"])
return response
def _format_time(self, ts: int) -> str:
return datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
def get_user_details(self) -> None:
r = self._query(PetkitURL.USER_DETAILS)
self.user = r["result"]["user"]
def discover_devices(self) -> List[Dict]:
r = self._query(PetkitURL.DISCOVERY)
return r["result"]["devices"]
def get_device_details(self, device_id: int) -> Dict:
r = self._query(f"{PetkitURL.PURAX_DETAILS}?id={device_id}")
return r["result"]
def get_device_records(self, device_id: int) -> List[Dict]:
r = self._query(f"{PetkitURL.PURAX_RECORDS}?deviceId={device_id}")
return [self.parse_record(row) for row in r["result"]]
def parse_record(self, record):
if record["eventType"] == 10:
# Pet in Litter box
pet = self.find_most_possible_pet(record["content"]["petWeight"])
return (
record["timestamp"],
PetEvent(
self._format_time(record["content"]["timeIn"]),
self._format_time(record["content"]["timeOut"]),
record["content"]["timeOut"] - record["content"]["timeIn"],
(pet or {}).get("name"),
record["content"]["petWeight"]
)
)
if record["eventType"] == 5:
# cleaning
return(
record["timestamp"],
CleanEvent(
self._format_time(record["content"]["startTime"]),
self._format_time(record["timestamp"]),
record["timestamp"] - record["content"]["startTime"],
"clean",
START_REASON.get(
record["content"]["startReason"]) or record["content"]["startReason"],
litter_percent=record["content"]["litterPercent"],
need_clean=record["content"]["boxFull"]
)
)
if record["eventType"] == 8:
# deorder
return (
record["timestamp"],
CleanEvent(
self._format_time(record["content"]["startTime"]),
self._format_time(record["timestamp"]),
record["timestamp"] - record["content"]["startTime"],
"deorder",
START_REASON[record["content"]["startReason"]],
deoder_percent=record["content"]["liquid"],
refill_deoder=record["content"]["liquidLack"]
)
)
if record["eventType"] == 7:
# reset
return (
record["timestamp"],
CleanEvent(
self._format_time(record["content"]["startTime"]),
self._format_time(record["timestamp"]),
record["timestamp"] - record["content"]["startTime"],
"reset",
START_REASON.get(
record["content"]["startReason"]) or record["content"]["startReason"],
)
)
return record["timestamp"], record
def find_most_possible_pet(self, weight):
if self.user is None:
self.get_user_details()
pet = sorted(
self.user["dogs"],
key=lambda p: abs(p["weight"] * 1000 - weight)
)[0]
if pet["weight"] > 600:
return None
return pet
def get_pet_names(self):
if self.user is None:
self.get_user_details()
return [p["name"] for p in self.user["dogs"]]
|
StarcoderdataPython
|
3359768
|
def hello():
print("Guten Tag! It's me Sivant :)")
|
StarcoderdataPython
|
8192005
|
<reponame>wreiner/Office365-REST-Python-Client
class ClientCredential(object):
def __init__(self, client_id, client_secret):
"""
Client credentials
:type client_secret: str
:type client_id: str
"""
self.clientId = client_id
self.clientSecret = client_secret
|
StarcoderdataPython
|
4817735
|
<reponame>BMeu/Orchard<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Unit Test: orchard.system_status.system.cpu
"""
import subprocess
import unittest
import mock
import orchard
import orchard.system_status.system.cpu as cpu
class CPUUnitTest(unittest.TestCase):
def setUp(self):
app = orchard.create_app('Testing')
self.app_context = app.app_context()
self.app_context.push()
self.client = app.test_client(use_cookies = True)
def tearDown(self):
self.app_context.pop()
def test_load(self):
patch = 'orchard.system_status.system.cpu.open'
mock_file = mock.mock_open(read_data = '0.01 0.05 0.15 1/601 12343')
with mock.patch(patch, mock_file, create = True) as mock_open:
load_one = cpu.load(cpu.LoadPeriods.one)
load_five = cpu.load(cpu.LoadPeriods.five)
load_fifteen = cpu.load(cpu.LoadPeriods.fifteen)
self.assertEqual(load_one, 0.01)
self.assertEqual(load_five, 0.05)
self.assertEqual(load_fifteen, 0.15)
mock_open.assert_called_with('/proc/loadavg', 'r')
mock_file = mock.mock_open(read_data = '0.01')
with mock.patch(patch, mock_file, create = True) as mock_open:
load_one = cpu.load(cpu.LoadPeriods.one)
load_five = cpu.load(cpu.LoadPeriods.five)
load_fifteen = cpu.load(cpu.LoadPeriods.fifteen)
self.assertEqual(load_one, 0.01)
self.assertEqual(load_five, 0.0)
self.assertEqual(load_fifteen, 0.0)
mock_open.assert_called_with('/proc/loadavg', 'r')
@mock.patch('subprocess.Popen')
def test_temperature(self, mock_popen):
mock_process = mock.Mock()
attributes = {
'communicate.return_value': (b'temp=42.1337\'C', '')
}
mock_process.configure_mock(**attributes)
mock_popen.return_value = mock_process
temperature = cpu.temperatue()
mock_popen.assert_called_with(['sudo', 'vcgencmd', 'measure_temp'],
stdout = subprocess.PIPE)
self.assertEqual(temperature, 42.1337)
|
StarcoderdataPython
|
242558
|
'''
Be careful to circular import
'''
def init_routes(api):
from .helloworld import HelloWorld
from .auth import RegisterApi, LoginApi, LogoutApi
from .user import UserApi, UserPetApi, UserPpcamApi, UserPadApi
from .pet import PetRegisterApi, PetApi
from .pet_record import PetRecordApi, PetRecordsApi
from .pet_record_image import PetRecordImageApi
from .ppcam import PpcamRegisterApi, PpcamLoginApi, PpcamApi
from .pad import PadApi
from .ppsnack import PpsnackApi, PpsnackFeedApi
from .statistics import DailyStatApi, WeeklyStatApi, MonthlyStatApi, TotalMonthStatApi
from .polling import PpcamPollingApi
# Helloworld
api.add_resource(HelloWorld, '/')
# Auth
api.add_resource(RegisterApi, '/user/register')
api.add_resource(LoginApi, '/user/login')
api.add_resource(LogoutApi, '/user/logout')
# User
api.add_resource(UserApi, '/user/<int:user_id>')
api.add_resource(UserPetApi, '/user/<int:user_id>/pet')
api.add_resource(UserPpcamApi, '/user/<int:user_id>/ppcam')
api.add_resource(UserPadApi, '/user/<int:user_id>/pad')
# Pet
api.add_resource(PetRegisterApi, '/pet/register')
api.add_resource(PetApi, '/pet/<int:pet_id>')
# PetRecord
api.add_resource(PetRecordApi, '/pet/<int:pet_id>/record')
api.add_resource(PetRecordsApi, '/pet/<int:pet_id>/records')
# PetRecordImage
api.add_resource(PetRecordImageApi, '/pet/<int:pet_id>/record/image')
# Ppcam
api.add_resource(PpcamRegisterApi, '/ppcam/register')
api.add_resource(PpcamLoginApi, '/ppcam/login')
api.add_resource(PpcamApi, '/ppcam/<int:ppcam_id>')
api.add_resource(PpcamPollingApi, '/ppcam/<int:ppcam_id>/polling')
# Pad
api.add_resource(PadApi, '/ppcam/<int:ppcam_id>/pad')
# Ppsnack
api.add_resource(PpsnackApi, '/ppcam/<int:ppcam_id>/ppsnack')
api.add_resource(PpsnackFeedApi, '/ppcam/<int:ppcam_id>/ppsnack/feeding')
# Statistics
api.add_resource(DailyStatApi, '/pet/<int:pet_id>/report/daily')
api.add_resource(WeeklyStatApi, '/pet/<int:pet_id>/report/weekly')
api.add_resource(MonthlyStatApi, '/pet/<int:pet_id>/report/monthly')
api.add_resource(TotalMonthStatApi, '/pet/<int:pet_id>/report/total')
|
StarcoderdataPython
|
3427548
|
<filename>Toolkits/VCS/repology__repology-api/repology-app.py
#!/usr/bin/env python3
#
# Copyright (C) 2016-2017 <NAME> <<EMAIL>>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import json
import math
from operator import itemgetter
import flask
from werkzeug.contrib.profiler import ProfilerMiddleware
from repology.database import Database
from repology.graphprocessor import GraphProcessor
from repology.metapackageproc import *
from repology.package import *
from repology.packageproc import *
from repology.queryfilters import *
from repology.repoman import RepositoryManager
from repology.template_helpers import *
from repology.version import VersionCompare
# create application and handle configuration
app = flask.Flask(__name__)
app.config.from_pyfile('repology.conf.default')
app.config.from_pyfile('repology.conf', silent=True)
app.config.from_envvar('REPOLOGY_CONFIG', silent=True)
# global repology objects
repoman = RepositoryManager(app.config['REPOS_DIR'], 'dummy') # XXX: should not construct fetchers and parsers here
repometadata = repoman.GetMetadata(app.config['REPOSITORIES'])
reponames = repoman.GetNames(app.config['REPOSITORIES'])
# templates: tuning
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
# templates: custom filters
app.jinja_env.filters['pkg_format'] = pkg_format
app.jinja_env.filters['css_for_package_versionclass'] = css_for_package_versionclass
app.jinja_env.filters['css_for_summary_versionclass'] = css_for_summary_versionclass
app.jinja_env.filters['maintainer_to_links'] = maintainer_to_links
app.jinja_env.filters['maintainers_to_group_mailto'] = maintainers_to_group_mailto
# templates: custom tests
app.jinja_env.tests['for_page'] = for_page
# templates: custom global functions
app.jinja_env.globals['url_for_self'] = url_for_self
# templates: custom global data
app.jinja_env.globals['REPOLOGY_HOME'] = app.config['REPOLOGY_HOME']
app.jinja_env.globals['repometadata'] = repometadata
app.jinja_env.globals['reponames'] = reponames
def get_db():
if not hasattr(flask.g, 'database'):
flask.g.database = Database(app.config['DSN'], readonly=False, autocommit=True)
return flask.g.database
# helpers
def api_v1_package_to_json(package):
output = {
field: getattr(package, field) for field in (
'repo',
'subrepo',
'name',
'version',
'origversion',
'maintainers',
#'category',
#'comment',
#'homepage',
'licenses',
'downloads'
) if getattr(package, field)
}
# XXX: these tweaks should be implemented in core
if package.homepage:
output['www'] = [package.homepage]
if package.comment:
output['summary'] = package.comment
if package.category:
output['categories'] = [package.category]
return output
def api_v1_metapackages_generic(bound, *filters):
metapackages = PackagesToMetapackages(
get_db().GetMetapackages(
bound_to_filter(bound),
*filters,
limit=app.config['METAPACKAGES_PER_PAGE']
)
)
metapackages = {metapackage_name: list(map(api_v1_package_to_json, packageset)) for metapackage_name, packageset in metapackages.items()}
return (
json.dumps(metapackages),
{'Content-type': 'application/json'}
)
def bound_to_filter(bound):
if bound and bound.startswith('<'):
return NameBeforeQueryFilter(bound[1:])
elif bound and bound.startswith('>'):
return NameAfterQueryFilter(bound[1:])
else:
return NameStartingQueryFilter(bound)
def get_packages_name_range(packages):
firstname, lastname = None, None
if packages:
firstname = lastname = packages[0].effname
for package in packages[1:]:
lastname = max(lastname, package.effname)
firstname = min(firstname, package.effname)
return firstname, lastname
def metapackages_to_data(metapackages, repo=None, maintainer=None):
metapackagedata = {}
for metapackagename, packages in sorted(metapackages.items()):
packages = PackagesetSortByVersions(packages)
# 1. Aggregate by repository
packages_by_repo = {}
for package in packages:
if package.repo not in packages_by_repo:
packages_by_repo[package.repo] = []
packages_by_repo[package.repo].append(package)
# 2.1. Extract explicit packages
# 2.2. Discover repos worth showing
# Repo not worth showin is the repo from which all newest (in this repo) packages
# were extracted as explicit
explicit_packages = []
ignored_packages = []
repos_worth_showing = set()
for reponame, repopackages in packages_by_repo.items():
bestversion = None
for package in repopackages:
# discover best version
if bestversion is None and package.versionclass != PackageVersionClass.ignored:
bestversion = package.version
if (repo is not None and repo == package.repo) or (maintainer is not None and maintainer in package.maintainers):
explicit_packages.append(package)
elif package.versionclass == PackageVersionClass.ignored:
ignored_packages.append(package)
elif VersionCompare(package.version, bestversion) == 0:
repos_worth_showing.add(reponame)
# 3. Extract newest package from each repo
newest_packages = []
for reponame in repos_worth_showing:
for package in packages_by_repo[reponame]:
if package.versionclass != PackageVersionClass.ignored:
newest_packages.append(package)
break
# 4. Aggregate by versions
def VersionsDigest(version):
return {
'version': version['version'],
'families': set(map(lambda p: p.family, version['packages'])),
'class': version['packages'][0].versionclass,
}
versions = PackagesetAggregateByVersions(newest_packages)
metapackagedata[metapackagename] = {
'families': PackagesetToFamilies(packages),
'explicit': map(VersionsDigest, PackagesetAggregateByVersions(explicit_packages)),
'newest': map(VersionsDigest, filter(lambda v: v['packages'][0].versionclass == PackageVersionClass.newest, versions)),
'outdated': map(VersionsDigest, filter(lambda v: v['packages'][0].versionclass == PackageVersionClass.outdated, versions)),
'ignored': map(VersionsDigest, PackagesetAggregateByVersions(ignored_packages))
}
return metapackagedata
def metapackages_generic(bound, *filters, template='metapackages.html', repo=None, maintainer=None):
namefilter = bound_to_filter(bound)
# process search
search = flask.request.args.to_dict().get('search')
searchfilter = NameSubstringQueryFilter(search) if search else None
# get packages
packages = get_db().GetMetapackages(namefilter, InAnyRepoQueryFilter(reponames), searchfilter, *filters, limit=app.config['METAPACKAGES_PER_PAGE'])
# on empty result, fallback to show first, last set of results
if not packages:
if bound and bound.startswith('<'):
namefilter = NameStartingQueryFilter()
else:
namefilter = NameBeforeQueryFilter()
packages = get_db().GetMetapackages(namefilter, InAnyRepoQueryFilter(reponames), searchfilter, *filters, limit=app.config['METAPACKAGES_PER_PAGE'])
firstname, lastname = get_packages_name_range(packages)
metapackagedata = metapackages_to_data(PackagesToMetapackages(packages), repo, maintainer)
return flask.render_template(
template,
firstname=firstname,
lastname=lastname,
search=search,
metapackagedata=metapackagedata,
repo=repo,
maintainer=maintainer
)
@app.route('/')
def index():
repostats = [
repo for repo in get_db().GetRepositories()
if repo['name'] in reponames and repometadata[repo['name']]['type'] == 'repository'
]
top_repos = {
'by_total': [
{
'name': repo['name'],
'value': repo['num_metapackages'],
}
for repo in sorted(repostats, key=lambda repo: repo['num_metapackages'], reverse=True)[:10]
],
'by_newest': [
{
'name': repo['name'],
'value': repo['num_metapackages_newest'],
}
for repo in sorted(repostats, key=lambda repo: repo['num_metapackages_newest'], reverse=True)[:10]
],
'by_pnewest': [
{
'name': repo['name'],
'value': '{:.2f}%'.format(100.0 * repo['num_metapackages_newest'] / repo['num_metapackages'] if repo['num_metapackages'] else 0),
}
for repo in sorted(repostats, key=lambda repo: repo['num_metapackages_newest'] / (repo['num_metapackages'] or 1), reverse=True)[:10]
]
}
important_packages = [
'apache24',
'awesome',
'bash',
'binutils',
'bison',
'blender',
'boost',
'bzip2',
'chromium',
'claws-mail',
'cmake',
'cppcheck',
'cups',
'curl',
'darktable',
'dia',
'djvulibre',
'dosbox',
'dovecot',
'doxygen',
'dvd+rw-tools',
'emacs',
'exim',
'ffmpeg',
'firefox',
'flex',
'fluxbox',
'freecad',
'freetype',
'gcc',
'gdb',
'geeqie',
'gimp',
'git',
'gnupg',
'go',
'graphviz',
'grub',
'icewm',
'imagemagick',
'inkscape',
'irssi',
'kodi',
'lame',
'lftp',
'libreoffice',
'libressl',
'lighttpd',
'links',
'llvm',
'mariadb',
'maxima',
'mc',
'memcached',
'mercurial',
'mesa',
'mplayer',
'mutt',
'mysql',
'nginx',
'nmap',
'octave',
'openbox',
'openssh',
'openssl',
'openttf',
'openvpn',
'p7zip',
'perl',
'pidgin',
'postfix',
'postgresql',
'privoxy',
'procmail',
'python3',
'qemu',
'rdesktop',
'redis',
'rrdtool',
'rsync',
'rtorrent',
'rxvt-unicode',
'samba',
'sane-backends',
'scons',
'screen',
'scribus',
'scummvm',
'sdl2',
'smartmontools',
'sqlite3',
'squid',
'subversion',
'sudo',
'sumversion',
'thunderbird',
'tigervnc',
'tmux',
'tor',
'valgrind',
'vim',
'virtualbox',
'vlc',
'vsftpd',
'wayland',
'wesnoth',
'wget',
'wine',
'wireshark',
'xorg-server',
'youtube-dl',
'zsh',
]
packages = get_db().GetMetapackage(important_packages)
metapackagedata = metapackages_to_data(PackagesToMetapackages(packages))
return flask.render_template(
'index.html',
top_repos=top_repos,
metapackagedata=metapackagedata
)
@app.route('/metapackages/') # XXX: redirect to metapackages/all?
@app.route('/metapackages/all/')
@app.route('/metapackages/all/<bound>/')
def metapackages_all(bound=None):
return metapackages_generic(
bound,
template='metapackages-all.html'
)
@app.route('/metapackages/unique/')
@app.route('/metapackages/unique/<bound>/')
def metapackages_unique(bound=None):
return metapackages_generic(
bound,
InNumFamiliesQueryFilter(less=1),
template='metapackages-unique.html'
)
@app.route('/metapackages/widespread/')
@app.route('/metapackages/widespread/<bound>/')
def metapackages_widespread(bound=None):
return metapackages_generic(
bound,
InNumFamiliesQueryFilter(more=10),
template='metapackages-widespread.html'
)
@app.route('/metapackages/in-repo/<repo>/')
@app.route('/metapackages/in-repo/<repo>/<bound>/')
def metapackages_in_repo(repo, bound=None):
if not repo or repo not in repometadata:
flask.abort(404)
return metapackages_generic(
bound,
InRepoQueryFilter(repo),
template='metapackages-in-repo.html',
repo=repo,
)
@app.route('/metapackages/outdated-in-repo/<repo>/')
@app.route('/metapackages/outdated-in-repo/<repo>/<bound>/')
def metapackages_outdated_in_repo(repo, bound=None):
if not repo or repo not in repometadata:
flask.abort(404)
return metapackages_generic(
bound,
OutdatedInRepoQueryFilter(repo),
template='metapackages-outdated-in-repo.html',
repo=repo,
)
@app.route('/metapackages/not-in-repo/<repo>/')
@app.route('/metapackages/not-in-repo/<repo>/<bound>/')
def metapackages_not_in_repo(repo, bound=None):
if not repo or repo not in repometadata:
flask.abort(404)
return metapackages_generic(
bound,
NotInRepoQueryFilter(repo),
template='metapackages-not-in-repo.html',
repo=repo,
)
@app.route('/metapackages/candidates-for-repo/<repo>/')
@app.route('/metapackages/candidates-for-repo/<repo>/<bound>/')
def metapackages_candidates_for_repo(repo, bound=None):
if not repo or repo not in repometadata:
flask.abort(404)
return metapackages_generic(
bound,
NotInRepoQueryFilter(repo),
InNumFamiliesQueryFilter(more=5),
template='metapackages-candidates-for-repo.html',
repo=repo,
)
@app.route('/metapackages/unique-in-repo/<repo>/')
@app.route('/metapackages/unique-in-repo/<repo>/<bound>/')
def metapackages_unique_in_repo(repo, bound=None):
if not repo or repo not in repometadata:
flask.abort(404)
return metapackages_generic(
bound,
InRepoQueryFilter(repo),
InNumFamiliesQueryFilter(less=1),
template='metapackages-unique-in-repo.html',
repo=repo,
)
@app.route('/metapackages/by-maintainer/<maintainer>/')
@app.route('/metapackages/by-maintainer/<maintainer>/<bound>/')
def metapackages_by_maintainer(maintainer, bound=None):
return metapackages_generic(
bound,
MaintainerQueryFilter(maintainer),
template='metapackages-by-maintainer.html',
maintainer=maintainer,
)
@app.route('/metapackages/outdated-by-maintainer/<maintainer>/')
@app.route('/metapackages/outdated-by-maintainer/<maintainer>/<bound>/')
def metapackages_outdated_by_maintainer(maintainer, bound=None):
return metapackages_generic(
bound,
MaintainerOutdatedQueryFilter(maintainer),
template='metapackages-outdated-by-maintainer.html',
maintainer=maintainer,
)
@app.route('/maintainers/')
@app.route('/maintainers/<bound>/')
def maintainers(bound=None):
reverse = False
if bound and bound.startswith('..'):
bound = bound[2:]
reverse = True
elif bound and bound.endswith('..'):
bound = bound[:-2]
search = flask.request.args.to_dict().get('search')
minmaintainer, maxmaintainer = get_db().GetMaintainersRange()
maintainers = get_db().GetMaintainers(bound, reverse, search, app.config['MAINTAINERS_PER_PAGE'])
firstpage, lastpage = False, False
for maintainer in maintainers:
if maintainer['maintainer'] == minmaintainer:
firstpage = True
if maintainer['maintainer'] == maxmaintainer:
lastpage = True
return flask.render_template(
'maintainers.html',
search=search,
minmaintainer=minmaintainer,
maxmaintainer=maxmaintainer,
firstpage=firstpage,
lastpage=lastpage,
maintainers=maintainers
)
@app.route('/maintainer/<maintainer>')
def maintainer(maintainer):
maintainer_info = get_db().GetMaintainerInformation(maintainer)
metapackages = get_db().GetMaintainerMetapackages(maintainer, 500)
similar_maintainers = get_db().GetMaintainerSimilarMaintainers(maintainer, 50)
numproblems = get_db().GetProblemsCount(maintainer=maintainer)
if not maintainer_info:
flask.abort(404)
return flask.render_template(
'maintainer.html',
numproblems=numproblems,
maintainer=maintainer,
maintainer_info=maintainer_info,
metapackages=metapackages,
similar_maintainers=similar_maintainers
)
@app.route('/maintainer/<maintainer>/problems')
def maintainer_problems(maintainer):
return flask.render_template(
'maintainer-problems.html',
maintainer=maintainer,
problems=get_db().GetProblems(
maintainer=maintainer,
limit=app.config['PROBLEMS_PER_PAGE']
)
)
@app.route('/repositories/')
def repositories():
return flask.render_template('repositories.html')
@app.route('/repository/<repo>')
def repository(repo):
if not repo or repo not in repometadata:
flask.abort(404)
return flask.render_template(
'repository.html',
repo=repo,
repo_info=get_db().GetRepository(repo)
)
@app.route('/repository/<repo>/problems')
def repository_problems(repo):
if not repo or repo not in repometadata:
flask.abort(404)
return flask.render_template('repository-problems.html', repo=repo, problems=get_db().GetProblems(repo=repo, limit=app.config['PROBLEMS_PER_PAGE']))
@app.route('/metapackage/<name>')
def metapackage(name):
# metapackage landing page; just redirect to packages, may change in future
return flask.redirect(flask.url_for('metapackage_versions', name=name), 303)
@app.route('/metapackage/<name>/versions')
def metapackage_versions(name):
packages_by_repo = {}
for package in get_db().GetMetapackage(name):
if package.repo not in packages_by_repo:
packages_by_repo[package.repo] = []
packages_by_repo[package.repo].append(package)
for repo, packages in packages_by_repo.items():
packages_by_repo[repo] = PackagesetSortByVersions(packages)
return flask.render_template(
'metapackage-versions.html',
reponames_absent=[reponame for reponame in reponames if reponame not in packages_by_repo],
packages_by_repo=packages_by_repo,
name=name
)
@app.route('/metapackage/<name>/packages')
def metapackage_packages(name):
packages = get_db().GetMetapackage(name)
packages = sorted(packages, key=lambda package: package.repo + package.name + package.version)
return flask.render_template(
'metapackage-packages.html',
packages=packages,
name=name,
link_statuses=get_db().GetMetapackageLinkStatuses(name)
)
@app.route('/metapackage/<name>/information')
def metapackage_information(name):
packages = get_db().GetMetapackage(name)
packages = sorted(packages, key=lambda package: package.repo + package.name + package.version)
information = {}
def append_info(infokey, infoval, package):
if infokey not in information:
information[infokey] = {}
if infoval not in information[infokey]:
information[infokey][infoval] = set()
information[infokey][infoval].add(package.family)
for package in packages:
append_info('names', package.name, package)
append_info('versions', package.version, package)
append_info('repos', package.repo, package)
if package.comment:
append_info('summaries', package.comment, package)
for maintainer in package.maintainers:
append_info('maintainers', maintainer, package)
if package.category:
append_info('categories', package.category, package)
if package.homepage:
append_info('homepages', package.homepage, package)
for download in package.downloads:
append_info('downloads', download, package)
versions = PackagesetAggregateByVersions(packages)
for version in versions:
version['families'] = list(sorted(PackagesetToFamilies(version['packages'])))
return flask.render_template(
'metapackage-information.html',
information=information,
versions=versions,
name=name,
link_statuses=get_db().GetMetapackageLinkStatuses(name)
)
@app.route('/metapackage/<name>/related')
def metapackage_related(name):
names = get_db().GetRelatedMetapackages(name, limit=app.config['METAPACKAGES_PER_PAGE'])
too_many_warning = None
if len(names) == app.config['METAPACKAGES_PER_PAGE']:
too_many_warning = app.config['METAPACKAGES_PER_PAGE']
packages = get_db().GetMetapackage(names)
metapackagedata = metapackages_to_data(PackagesToMetapackages(packages))
return flask.render_template(
'metapackage-related.html',
name=name,
metapackagedata=metapackagedata,
too_many_warning=too_many_warning
)
@app.route('/metapackage/<name>/badges')
def metapackage_badges(name):
packages = get_db().GetMetapackage(name)
repos = sorted(list(set([package.repo for package in packages])))
return flask.render_template('metapackage-badges.html', name=name, repos=repos)
@app.route('/metapackage/<name>/report', methods=['GET', 'POST'])
def metapackage_report(name):
if flask.request.method == 'POST':
if get_db().GetReportsCount(name) >= app.config['MAX_REPORTS']:
flask.flash('Could not add report: too many reports for this metapackage', 'danger')
return flask.redirect(flask.url_for('metapackage_report', name=name))
need_verignore = 'need_verignore' in flask.request.form
need_split = 'need_split' in flask.request.form
need_merge = 'need_merge' in flask.request.form
comment = flask.request.form.get('comment', '').strip().replace('\r', '') or None
if comment and len(comment) > 1024:
flask.flash('Could not add report: comment os too long', 'danger')
return flask.redirect(flask.url_for('metapackage_report', name=name))
if not need_verignore and not need_split and not need_merge and not comment:
flask.flash('Could not add report: please fill out the form', 'danger')
return flask.redirect(flask.url_for('metapackage_report', name=name))
if '<a href' in comment:
flask.flash('Spammers not welcome, HTML not allowed', 'danger')
return flask.redirect(flask.url_for('metapackage_report', name=name))
get_db().AddReport(
name,
need_verignore,
need_split,
need_merge,
comment
)
flask.flash('Report for {} added succesfully and will be processed in a few days, thank you!'.format(name), 'success')
return flask.redirect(flask.url_for('metapackage_report', name=name))
return flask.render_template(
'metapackage-report.html',
reports=get_db().GetReports(name),
name=name,
afk_till=AFKChecker(app.config['STAFF_AFK']).GetAFKEnd()
)
@app.route('/badge/vertical-allrepos/<name>.svg')
def badge_vertical_allrepos(name):
summaries = PackagesetToSummaries(get_db().GetMetapackage(name))
repostates = []
for reponame, summary in summaries.items():
repostates.append({
'name': repometadata[reponame]['desc'],
'version': summary['version'],
'versionclass': summary['versionclass']
})
return (
flask.render_template(
'badge-vertical.svg',
repositories=sorted(repostates, key=lambda repo: repo['name']),
name=name
),
{'Content-type': 'image/svg+xml'}
)
@app.route('/badge/tiny-repos/<name>.svg')
def badge_tiny_repos(name):
num_families = len(set([package.family for package in get_db().GetMetapackage(name)]))
return (
flask.render_template(
'badge-tiny.svg',
name=name,
num_families=num_families
),
{'Content-type': 'image/svg+xml'}
)
@app.route('/badge/version-for-repo/<repo>/<name>.svg')
def badge_version_for_repo(repo, name):
summaries = PackagesetToSummaries(get_db().GetMetapackage(name))
if repo not in summaries:
flask.abort(404)
return (
flask.render_template(
'badge-tiny-version.svg',
repo=repo,
version=summaries[repo]['version'],
versionclass=summaries[repo]['versionclass'],
),
{'Content-type': 'image/svg+xml'}
)
@app.route('/news')
def news():
return flask.render_template('news.html')
@app.route('/about')
def about():
return flask.render_template('about.html')
@app.route('/opensearch/metapackage.xml')
def opensearch_metapackage():
return flask.render_template('opensearch-metapackage.xml'), {'Content-type': 'application/xml'}
@app.route('/opensearch/maintainer.xml')
def opensearch_maintainer():
return flask.render_template('opensearch-maintainer.xml'), {'Content-type': 'application/xml'}
@app.route('/statistics')
@app.route('/statistics/<sorting>')
def statistics(sorting=None):
repostats = filter(lambda r: r['name'] in reponames, get_db().GetRepositories())
showmedals = True
if sorting == 'newest':
repostats = sorted(repostats, key=lambda s: s['num_metapackages_newest'], reverse=True)
elif sorting == 'pnewest':
repostats = sorted(repostats, key=lambda s: s['num_metapackages_newest'] / (s['num_metapackages'] or 1), reverse=True)
elif sorting == 'outdated':
repostats = sorted(repostats, key=lambda s: s['num_metapackages_outdated'], reverse=True)
elif sorting == 'poutdated':
repostats = sorted(repostats, key=lambda s: s['num_metapackages_outdated'] / (s['num_metapackages'] or 1), reverse=True)
elif sorting == 'total':
repostats = sorted(repostats, key=lambda s: s['num_metapackages'], reverse=True)
else:
sorting = 'name'
repostats = sorted(repostats, key=lambda s: s['name'])
showmedals = False
return flask.render_template(
'statistics.html',
sorting=sorting,
repostats=repostats,
showmedals=showmedals,
repostats_old={}, # {repo['name']: repo for repo in get_db().GetRepositoriesHistoryAgo(60 * 60 * 24 * 7)},
num_metapackages=get_db().GetMetapackagesCount()
)
def graph_generic(getgraph, color, suffix=''):
# use autoscaling until history is filled
numdays = 14
width = 1140
height = 400
gwidth = width - 50
gheight = height - 20
period = 60 * 60 * 24 * numdays
graph = getgraph(period)
return (
flask.render_template(
'graph.svg',
width=width,
height=height,
gwidth=gwidth,
gheight=gheight,
points=graph.GetPoints(period),
yticks=graph.GetYTicks(suffix),
color=color,
numdays=numdays,
x=lambda x: int((1.0 - x) * gwidth) + 0.5,
y=lambda y: int(10.0 + (1.0 - y) * (gheight - 20.0)) + 0.5,
),
{'Content-type': 'image/svg+xml'}
)
def graph_repo_generic(repo, getvalue, color, suffix=''):
if repo not in reponames:
flask.abort(404)
def GetGraph(period):
graph = GraphProcessor()
for histentry in get_db().GetRepositoriesHistoryPeriod(period, repo):
try:
graph.AddPoint(histentry['timedelta'], getvalue(histentry['snapshot']))
except:
pass # ignore missing keys, division errors etc.
return graph
return graph_generic(GetGraph, color, suffix)
def graph_total_generic(getvalue, color, suffix=''):
def GetGraph(period):
graph = GraphProcessor()
for histentry in get_db().GetStatisticsHistoryPeriod(period):
try:
graph.AddPoint(histentry['timedelta'], getvalue(histentry['snapshot']))
except:
pass # ignore missing keys, division errors etc.
return graph
return graph_generic(GetGraph, color, suffix)
@app.route('/graph/repo/<repo>/metapackages_total.svg')
def graph_repo_metapackages_total(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages'], '#000000')
@app.route('/graph/repo/<repo>/metapackages_newest.svg')
def graph_repo_metapackages_newest(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages_newest'], '#5cb85c')
@app.route('/graph/repo/<repo>/metapackages_newest_percent.svg')
def graph_repo_metapackages_newest_percent(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages_newest'] / s['num_metapackages'] * 100.0, '#5cb85c', '%')
@app.route('/graph/repo/<repo>/metapackages_outdated.svg')
def graph_repo_metapackages_outdated(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages_outdated'], '#d9534f')
@app.route('/graph/repo/<repo>/metapackages_outdated_percent.svg')
def graph_repo_metapackages_outdated_percent(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages_outdated'] / s['num_metapackages'] * 100.0, '#d9534f', '%')
@app.route('/graph/repo/<repo>/metapackages_unique.svg')
def graph_repo_metapackages_unique(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages_unique'], '#5bc0de')
@app.route('/graph/repo/<repo>/metapackages_unique_percent.svg')
def graph_repo_metapackages_unique_percent(repo):
return graph_repo_generic(repo, lambda s: s['num_metapackages_unique'] / s['num_metapackages'] * 100.0, '#5bc0de', '%')
@app.route('/graph/repo/<repo>/problems.svg')
def graph_repo_problems(repo):
return graph_repo_generic(repo, lambda s: s['num_problems'], '#c00000')
@app.route('/graph/repo/<repo>/problems_per_metapackage.svg')
def graph_repo_problems_per_metapackage(repo):
return graph_repo_generic(repo, lambda s: s['num_problems'] / s['num_metapackages'], '#c00000')
@app.route('/graph/repo/<repo>/maintainers.svg')
def graph_repo_maintainers(repo):
return graph_repo_generic(repo, lambda s: s['num_maintainers'], '#c000c0')
@app.route('/graph/repo/<repo>/packages_per_maintainer.svg')
def graph_repo_packages_per_maintainer(repo):
return graph_repo_generic(repo, lambda s: s['num_packages'] / s['num_maintainers'], '#c000c0')
@app.route('/graph/total/packages.svg')
def graph_total_packages():
return graph_total_generic(lambda s: s['num_packages'], '#000000')
@app.route('/graph/total/metapackages.svg')
def graph_total_metapackages():
return graph_total_generic(lambda s: s['num_metapackages'], '#000000')
@app.route('/graph/total/maintainers.svg')
def graph_total_maintainers():
return graph_total_generic(lambda s: s['num_maintainers'], '#c000c0')
@app.route('/graph/total/problems.svg')
def graph_total_problems():
return graph_total_generic(lambda s: s['num_problems'], '#c00000')
def clever_ceil(value):
if value == 0:
return 1
tick = math.pow(10, math.ceil(math.log(value, 10) - 2))
return int(math.ceil(value / tick) * tick)
def map_repo_generic(repo2coords, namex='X', namey='Y', unitx='', unity=''):
snapshots = [
#get_db().GetRepositoriesHistoryAgo(60 * 60 * 24 * 30)
]
points = []
for repo in get_db().GetRepositories():
if not repo['name'] in reponames:
continue
point = {
'text': repometadata[repo['name']]['desc'],
'coords': list(map(repo2coords, [repo] + [snapshot[repo['name']] for snapshot in snapshots if repo['name'] in snapshot]))
}
if 'color' in repometadata[repo['name']]:
point['color'] = repometadata[repo['name']]['color']
points.append(point)
width = 1140
height = 800
return (
flask.render_template(
'map.svg',
width=width,
height=height,
minx=0,
miny=0,
maxx=clever_ceil(max(map(lambda p: p['coords'][0]['x'], points))),
maxy=clever_ceil(max(map(lambda p: p['coords'][0]['y'], points))),
namex=namex,
namey=namey,
unitx=unitx,
unity=unity,
points=points,
),
{'Content-type': 'image/svg+xml'}
)
@app.route('/graph/map_repo_size_fresh.svg')
def graph_map_repo_size_fresh():
def repo2coords(repo):
return {
'x': repo['num_metapackages'],
'y': repo['num_metapackages_newest']
}
return map_repo_generic(
repo2coords,
namex='Number of packages in repository',
namey='Number of fresh packages in repository'
)
@app.route('/graph/map_repo_size_freshness.svg')
def graph_map_repo_size_freshness():
def repo2coords(repo):
return {
'x': repo['num_metapackages'],
'y': 100.0 * repo['num_metapackages_newest'] / repo['num_metapackages'] if repo['num_metapackages'] else 0
}
return map_repo_generic(
repo2coords,
namex='Number of packages in repository',
namey='Percentage of fresh packages',
unity='%'
)
@app.route('/api/v1/metapackage/<name>')
def api_v1_metapackage(name):
return (
json.dumps(list(map(
api_v1_package_to_json,
get_db().GetMetapackage(name)
))),
{'Content-type': 'application/json'}
)
@app.route('/api')
@app.route('/api/v1')
def api_v1():
return flask.render_template('api.html', per_page=app.config['METAPACKAGES_PER_PAGE'])
@app.route('/api/v1/metapackages/')
@app.route('/api/v1/metapackages/all/')
@app.route('/api/v1/metapackages/all/<bound>/')
def api_v1_metapackages_all(bound=None):
return api_v1_metapackages_generic(bound)
@app.route('/api/v1/metapackages/unique/')
@app.route('/api/v1/metapackages/unique/<bound>/')
def api_v1_metapackages_unique(bound=None):
return api_v1_metapackages_generic(bound, InNumFamiliesQueryFilter(less=1))
@app.route('/api/v1/metapackages/in-repo/<repo>/')
@app.route('/api/v1/metapackages/in-repo/<repo>/<bound>/')
def api_v1_metapackages_in_repo(repo, bound=None):
return api_v1_metapackages_generic(bound, InRepoQueryFilter(repo))
@app.route('/api/v1/metapackages/outdated-in-repo/<repo>/')
@app.route('/api/v1/metapackages/outdated-in-repo/<repo>/<bound>/')
def api_v1_metapackages_outdated_in_repo(repo, bound=None):
return api_v1_metapackages_generic(bound, OutdatedInRepoQueryFilter(repo))
@app.route('/api/v1/metapackages/not-in-repo/<repo>/')
@app.route('/api/v1/metapackages/not-in-repo/<repo>/<bound>/')
def api_v1_metapackages_not_in_repo(repo, bound=None):
return api_v1_metapackages_generic(bound, NotInRepoQueryFilter(repo))
@app.route('/api/v1/metapackages/candidates-in-repo/<repo>/')
@app.route('/api/v1/metapackages/candidates-in-repo/<repo>/<bound>/')
def api_v1_metapackages_candidates_in_repo(repo, bound=None):
return api_v1_metapackages_generic(bound, NotInRepoQueryFilter(repo), InNumFamiliesQueryFilter(more=5))
@app.route('/api/v1/metapackages/unique-in-repo/<repo>/')
@app.route('/api/v1/metapackages/unique-in-repo/<repo>/<bound>/')
def api_v1_metapackages_unique_in_repo(repo, bound=None):
return api_v1_metapackages_generic(bound, InNumFamiliesQueryFilter(less=1))
@app.route('/api/v1/metapackages/by-maintainer/<maintainer>/')
@app.route('/api/v1/metapackages/by-maintainer/<maintainer>/<bound>/')
def api_v1_metapackages_by_maintainer(maintainer, bound=None):
return api_v1_metapackages_generic(bound, MaintainerQueryFilter(maintainer))
@app.route('/api/v1/metapackages/outdated-by-maintainer/<maintainer>/')
@app.route('/api/v1/metapackages/outdated-by-maintainer/<maintainer>/<bound>/')
def api_v1_metapackages_outdated_by_maintainer(maintainer, bound=None):
return api_v1_metapackages_generic(bound, MaintainerOutdatedQueryFilter(maintainer))
@app.route('/api/v1/repository/<repo>/problems')
def api_v1_repository_problems(repo):
return (
json.dumps(get_db().GetProblems(repo=repo)),
{'Content-type': 'application/json'}
)
@app.route('/api/v1/maintainer/<maintainer>/problems')
def api_v1_maintainer_problems(maintainer):
return (
json.dumps(get_db().GetProblems(maintainer=maintainer)),
{'Content-type': 'application/json'}
)
if __name__ == '__main__':
if app.config['PROFILE']:
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
app.run(debug=True)
else:
app.run()
|
StarcoderdataPython
|
6705534
|
<gh_stars>1-10
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
#
# This file was adapted from https://github.com/venth/aws-adfs. Thanks to https://github.com/venth for his work on
# figuring this out
#
from builtins import object
from future import standard_library
standard_library.install_aliases()
import os
import re
import sys
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
import lxml.etree as ET
import requests
from bs4 import BeautifulSoup
from ..util.consoleeffects import Colors
# idpentryurl: The initial url that starts the authentication process.
adfs_entry_url = 'https://awslogin.byu.edu:443/adfs/ls/IdpInitiatedSignOn.aspx?loginToRp=urn:amazon:webservices'
class AdfsAuthResult(object):
def __init__(self, action_url, context, signed_response, session):
self.action_url = action_url
self.context = context
self.signed_response = signed_response
self.session = session
def authenticate(username, password):
# Initiate session handler
session = requests.Session()
# Get the ADFS sign-in page HTML
login_page_response = session.get(adfs_entry_url, verify=True)
# Parse the response and extract all the necessary values
# in order to build a dictionary of all of the form values the IdP expects
login_html_soup = BeautifulSoup(login_page_response.text, "lxml")
auth_payload = _get_auth_payload(login_html_soup, username, password)
# From the form action for the login form, build the URL used to submit the login request
adfs_form_submit_url = _get_login_submit_url(login_html_soup)
# Login with the ADFS credentials
login_response = session.post(
adfs_form_submit_url, data=auth_payload, verify=True)
login_response_html_soup = BeautifulSoup(login_response.text, 'lxml')
# Check that authentication succeeded. Exit with error if it didn't
_check_adfs_authentication_success(login_response_html_soup)
# Perform DUO MFA
auth_signature, duo_request_signature = _authenticate_duo(login_response_html_soup, True, session)
signed_response = _get_signed_response(auth_signature, duo_request_signature)
context = _context(login_response_html_soup)
action_url = _action_url_on_validation_success(login_response_html_soup)
return AdfsAuthResult(action_url, context, signed_response, session)
_headers = {
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'text/plain, */*; q=0.01',
}
def _action_url_on_validation_success(login_response_html_soup):
options_form = login_response_html_soup.find('form', id='options')
return options_form['action']
def _get_signed_response(auth_signature, duo_request_signature):
return '{}:{}'.format(auth_signature, _app(duo_request_signature))
def _app(request_signature):
app_pattern = re.compile(".*(APP\|[^:]+)")
m = app_pattern.search(request_signature)
return m.group(1)
def _context(login_response_html_soup):
context_input = login_response_html_soup.find('input', id='context')
return context_input['value']
def _get_auth_payload(login_html_soup, username, password):
auth_payload = {}
for inputtag in login_html_soup.find_all(re.compile('(INPUT|input)')):
name = inputtag.get('name', '')
value = inputtag.get('value', '')
if "user" in name.lower():
# Make an educated guess that this is the right field for the username
auth_payload[name] = username
elif "pass" in name.lower():
# Make an educated guess that this is the right field for the password
auth_payload[name] = password
else:
# Simply populate the parameter with the existing value (picks up hidden fields in the login form)
auth_payload[name] = value
return auth_payload
def _get_login_submit_url(login_html_soup):
parsed_adfs_login_url = urlparse(adfs_entry_url)
adfs_form_submit_url = parsed_adfs_login_url.scheme + "://" + parsed_adfs_login_url.netloc
for inputtag in login_html_soup.find_all(re.compile('(FORM|form)')):
action = inputtag.get('action')
loginid = inputtag.get('id')
if (action and loginid == "loginForm"):
adfs_form_submit_url += action
return adfs_form_submit_url
def _check_adfs_authentication_success(login_response_html_soup):
login_form_tag = login_response_html_soup.find('form', id='loginForm')
if login_form_tag: # Login form present means the authentication failed
auth_error = login_form_tag.find('span', id='errorText')
print(auth_error.string)
exit(1)
def _authenticate_duo(duo_page_html_soup, roles_page_url, session):
duo_host = _duo_host(duo_page_html_soup)
duo_request_signature = _duo_request_signature(duo_page_html_soup)
print("Sending request for authentication")
(sid, preferred_factor, preferred_device), initiated = _initiate_authentication(
duo_host,
duo_request_signature,
roles_page_url,
session
)
if initiated:
transaction_id = _begin_authentication_transaction(
duo_host,
sid,
preferred_factor,
preferred_device,
session
)
print("Waiting for additional authentication")
_verify_that_code_was_sent(
duo_host,
sid,
transaction_id,
session
)
_verify_auth_result(
duo_host,
sid,
transaction_id,
session
)
auth_signature = _authentication_result(
duo_host,
sid,
transaction_id,
session,
)
return auth_signature, duo_request_signature
else:
raise RuntimeError("DUO Transaction Not Initiated")
def _authentication_result(
duo_host,
sid,
duo_transaction_id,
session
):
status_for_url = "https://{}/frame/status/{}".format(duo_host, duo_transaction_id)
response = session.post(
status_for_url,
verify=True,
headers=_headers,
data={
'sid': sid,
'txid': duo_transaction_id
}
)
if response.status_code != 200:
raise RuntimeError(
u'Issues during retrieval of a code entered into '
u'the device. The error response {}'.format(
response
)
)
json_response = response.json()
if json_response['stat'] != 'OK':
raise RuntimeError(
u'There was an issue during retrieval of a code entered into the device.'
u' The error response: {}'.format(
response.text
)
)
auth_signature = response.json()['response']['cookie']
return auth_signature
def _verify_auth_result(
duo_host,
sid,
duo_transaction_id,
session
):
status_for_url = "https://{}/frame/status".format(duo_host)
response = session.post(
status_for_url,
verify=True,
headers=_headers,
data={
'sid': sid,
'txid': duo_transaction_id
}
)
if response.status_code != 200:
raise RuntimeError(
u'Issues during retrieval of a code entered into '
u'the device. The error response {}'.format(
response
)
)
json_response = response.json()
if json_response['stat'] != 'OK':
raise RuntimeError(
u'There was an issue during retrieval of a code entered into the device.'
u' The error response: {}'.format(
response.text
)
)
if json_response['response']['status_code'] != 'allow':
if json_response['response']['reason'] == 'User mistake' and json_response['response']['status'] == 'Login request denied.':
print('{}Duo Auth Denied{}'.format(Colors.red, Colors.normal))
sys.exit(1)
raise RuntimeError(
u'There was an issue during retrieval of a code entered into the device.'
u' The error response: {}'.format(
response.text
)
)
def _verify_that_code_was_sent(duo_host, sid, duo_transaction_id, session):
status_for_url = "https://{}/frame/status".format(duo_host)
response = session.post(
status_for_url,
verify=True,
headers=_headers,
data={
'sid': sid,
'txid': duo_transaction_id
}
)
if response.status_code != 200:
raise RuntimeError(
u'Issues during sending code to the devide. The error response {}'.format(
response
)
)
json_response = response.json()
if json_response['stat'] != 'OK':
raise RuntimeError(
u'There was an issue during sending code to the device. The error response: {}'.format(
response.text
)
)
if json_response['response']['status_code'] != 'pushed':
raise RuntimeError(
u'There was an issue during sending code to the device. The error response: {}'.format(
response.text
)
)
def _tx(request_signature):
tx_pattern = re.compile("(TX\|[^:]+):APP.+")
m = tx_pattern.search(request_signature)
return m.group(1)
def _initiate_authentication(duo_host, duo_request_signature, roles_page_url, session):
prompt_for_url = 'https://{}/frame/web/v1/auth'.format(duo_host)
parent = "{}{}".format(
roles_page_url,
"&java_version="
"&flash_version="
"&screen_resolution_width=1280"
"&screen_resolution_height=800"
"&color_depth=24"
)
response = session.post(
prompt_for_url,
verify=True,
headers={
'Host': duo_host,
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:52.0) Gecko/20100101 Firefox/52.0",
'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
'Accept-Language': "en-US,en;q=0.5",
'Accept-Encoding': "gzip, deflate, br",
'DNT': "1",
'Upgrade-Insecure-Requests': "1",
'Content-Type': "application/x-www-form-urlencoded",
},
allow_redirects=True,
params={
'tx': _tx(duo_request_signature),
'parent': parent,
'v': '2.3',
},
data={
'parent': parent,
'java_version': '',
'flash_version': '22.0.0.209',
'screen_resolution_width': '1280',
'screen_resolution_height': '800',
'color_depth': '24',
}
)
if response.status_code != 200 or response.url is None:
return None, False
o = urlparse(response.url)
query = parse_qs(o.query)
if 'sid' not in query:
return None, False
sid = query['sid']
html_response = ET.fromstring(response.text, ET.HTMLParser())
preferred_factor = _preferred_factor(html_response)
preferred_device = _preferred_device(html_response)
return (sid, preferred_factor, preferred_device), True
def _preferred_factor(html_response):
preferred_factor_query = './/input[@name="preferred_factor"]'
element = html_response.find(preferred_factor_query)
return element.get('value')
def _preferred_device(html_response):
preferred_device_query = './/input[@name="preferred_device"]'
element = html_response.find(preferred_device_query)
return element.get('value')
def _begin_authentication_transaction(duo_host, sid, preferred_factor, preferred_device, session):
prompt_for_url = "https://{}/frame/prompt".format(duo_host)
response = session.post(
prompt_for_url,
verify=True,
headers=_headers,
data={
'sid': sid,
'factor': preferred_factor,
'device': preferred_device,
'out_of_date': ''
}
)
if response.status_code != 200:
raise RuntimeError(
u'Issues during beginning of the authentication process. The error response {}'.format(
response
)
)
json_response = response.json()
if json_response['stat'] != 'OK':
if json_response['message'] == 'Unknown authentication method.':
print("{}Generic Authentication Failure.\n{}Are you enrolled in Duo MFA?\nDid you enable Duo automatic push?{}".format(Colors.lred, Colors.lyellow, Colors.normal))
os._exit(1)
else:
raise RuntimeError(
u'Cannot begin authentication process. The error response: {}'.format(response.text)
)
return json_response['response']['txid']
def _duo_host(duo_page_html_soup):
duo_script = duo_page_html_soup.find('form', id='duo_form').find_next_sibling('script').string
duo_host_pattern = re.compile("'host': '([^']+)'")
m = duo_host_pattern.search(duo_script)
return m.group(1)
def _duo_request_signature(duo_page_html_soup):
duo_script = duo_page_html_soup.find('form', id='duo_form').find_next_sibling('script').string
duo_signature_pattern = re.compile("'sig_request': '([^']+)'")
m = duo_signature_pattern.search(duo_script)
return m.group(1)
|
StarcoderdataPython
|
6591810
|
import re
from datetime import date, timedelta
from bs4 import BeautifulSoup
from .utils.functions import get
# public interface -----------------------------------------------------
async def get_cafeteria(when: int):
day = (date.today() + timedelta(days=when)).day
try:
text: str = _cache[day]
except KeyError:
parser = _Cafeteria(when)
text = await parser.get_cafeteria()
_cache[day] = text
return text
# internals ------------------------------------------------------------
_cache = {}
class _Cafeteria:
def __init__(self, when: int):
self.when = when
self.day = str((date.today() + timedelta(days=when)).day)
self.date_ = (
"오늘"
if when == 0
else ("내일" if when == 1 else ("모레" if when == 2 else None))
)
async def get_cafeteria(self):
josa = "는" if self.when == 2 else "은"
if (date.today() + timedelta(days=self.when)).weekday() > 4:
text = f"{self.date_}({self.day}일){josa} 휴일이라 급식이 없네요."
else:
day = "0" + self.day if len(self.day) == 1 else self.day
month = str((date.today() + timedelta(days=self.when)).month)
day_and_month = ("0" + month if len(month) == 1 else month) + "." + day
response = await get("https://singil.sen.ms.kr/index.do")
soup = BeautifulSoup(response, "lxml")
text = f"{self.date_}({self.day}일){josa} 휴일이라 급식이 없네요."
for i in range(1, 4):
base_selector = f".school_menu_info > ul:nth-child({i}) > li:nth-child(1) > dl:nth-child(1) > "
try:
is_today_caferia = soup.select_one(
base_selector + "dt:nth-child(1) > a:nth-child(1)"
).text.endswith(day_and_month)
except AttributeError:
text = "제가 찾아보니 아직 학교 급식 메뉴가 업데이트 되지 않았네요. 잠시 후 다시 물어봐 주세요."
else:
if is_today_caferia:
header = [f"<{self.date_}({self.day}일) 급식>"]
cafeteria: list[str] = re.findall(
"[가-힣]+[(]?[ &,가-힣]+[)]?",
soup.select_one(
base_selector + "dd:nth-child(2) > p:nth-child(2)"
).text,
)
for j in cafeteria:
menu = j.replace("(", "\n(").replace("&", "\n")
header.append(menu)
text = "\n".join(header)
else:
continue
break
return text
|
StarcoderdataPython
|
1820994
|
<gh_stars>100-1000
#!/usr/bin/python
import os
import re
import sys
from junit_xml import TestSuite, TestCase
class Tap2JUnit:
""" This class reads a subset of TAP (Test Anything protocol)
and writes JUnit XML.
Two line formats are read:
1. (not )?ok testnum testname
2. # diagnostic output
1. Starts a new test result.
2. Adds diagnostic information to the last read result
Any 2. lines found before a 1. line are ignored.
Any lines not matching either pattern are ignored.
This script was written because none of the tap2junit converters
I could find inserted the failure output into the junit correctly.
And IMO a failed test with no indication of why is useless.
"""
def __init__(self, test_suite, test_class):
self.test_suite = test_suite
self.test_class = test_class
# This Regex matches a (not) ok testnum testname line from the
# TAP specification, using named capture groups
self.result_re = re.compile(
r"^(?P<result>not )?ok\s*(?P<testnum>[0-9])+\s*(?P<testname>.*)$")
self.comment_re = re.compile(r"^\s*#")
self.case = None
self.cases = []
def process_line(self, line):
""" This funuction reads a tap stream line by line
and groups the diagnostic output with the relevant
result in a dictionary.
Outputs a list of dicts, one for each result
"""
match = self.result_re.match(line)
if match:
# This line starts a new test result
self.case = match.groupdict()
self.case['stderr'] = []
self.cases.append(self.case)
return
match = self.comment_re.match(line)
if match and self.case:
# This line contains diagnostic
# output from a failed test
self.case['stderr'].append(re.sub(r'^\s*#', '', line).rstrip())
def convert(self, infile=sys.stdin, out=sys.stdout):
""" Reads a subset of TAP and writes JUnit XML """
# read lines
for line in infile.readlines():
self.process_line(line)
# Convert line dicts to test case objects
case_objs = []
for case in self.cases:
case_obj = TestCase(case['testname'], self.test_class, 0, '', '')
if case['result'] == 'not ':
case_obj.add_failure_info(output="\n".join(case['stderr']))
case_objs.append(case_obj)
# Combine test cases into a suite
suite = TestSuite(self.test_suite, case_objs)
# Write the suite out as XML
TestSuite.to_file(out, [suite])
def main():
t2j = Tap2JUnit(
os.environ.get('JUNIT_TEST_SUITE', 'tap2junit'),
os.environ.get('JUNIT_TEST_CLASS', 'tap2junit')
)
t2j.convert()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6473621
|
import module_dictionary
tmlistfile = open('tm.list', 'r')
dict_file = open('mdictionary.py', 'w')
dict_list = module_dictionary.modules_dicts
for line in tmlistfile:
line = line.replace('\n','')
new_dict = {
'OPCODE': line,
'NAME': 'null', # could ask for user input here for each one ??
'DESCRIPTION': 'null', # ^
'SUBSYSTEM': 'null', # ^
'VALUE': 'null' # ^
}
if new_dict not in dict_list:
# name = input('What is the name for {0}'.format(line))
dict_list.append(new_dict)
else:
# Exists, so just pass.
pass
dict_file.write('modules_dicts = ')
dict_file.write(''.join(str(dict_list)))
|
StarcoderdataPython
|
3554732
|
class Solution:
def splitIntoFibonacci(self, S):
"""
:type S: str
:rtype: List[int]
"""
def split_to_fib(i, j):
""
a = S[:i]
b = S[i:j]
if self.valid(a) and self.valid(b):
a, b = int(a), int(b)
ans = [a, b]
while j < len(S):
c = a + b
if c > (1 << 31) - 1:
return []
c_str = str(c)
if c_str == S[j : j + len(c_str)]:
a, b = b, c
ans.append(c)
j += len(c_str)
else:
return []
return ans
else:
return []
for i in range(1, len(S)):
for j in range(i + 1, len(S)):
subs = split_to_fib(i, j)
if subs:
return subs
return []
def valid(self, sub):
"Check if sub string is valid."
return not ((len(sub) > 1 and sub[0] == '0') or
int(sub) > (1 << 31) - 1)
|
StarcoderdataPython
|
8083740
|
from __future__ import print_function
from mongoalchemy.py3compat import *
from nose.tools import *
from mongoalchemy.session import Session
from mongoalchemy.document import Document, Index, FieldNotRetrieved
from mongoalchemy.fields import *
from mongoalchemy.query import BadQueryException, Query, BadResultException
from mongoalchemy.query_expression import Q
from test.util import known_failure
# TODO: Test al operators to make sure wrap is called on their values
class T(Document):
i = IntField()
j = IntField(required=False)
l = ListField(IntField(), required=False)
a = IntField(required=False, db_field='aa')
index = Index().ascending('i')
class T2(Document):
t = DocumentField(T)
class T3(Document):
t_list = ListField(DocumentField(T))
class NestedChild(Document):
i = IntField()
class NestedParent(Document):
l = ListField(DocumentField(NestedChild))
def get_session():
s = Session.connect('unit-testing')
s.clear_collection(T, T2, T3)
return s
#
# Test Query Fields
#
@raises(BadQueryException)
def test_sort_by_same_key():
s = get_session()
sorted_query = s.query(T).ascending(T.i).descending(T.i)
def test_name_generation():
s = get_session()
assert str(T.i) == 'i'
def test_ne():
assert (T.i != T.j) == True
assert (T.i != T.i) == False
def query_field_repr_test():
assert repr(T.i) == 'QueryField(i)'
def test_nested_matching():
assert str(NestedParent.l.i) == 'l.i'
#
# Test elem_match ($elemMatch)
#
def test_elem_match_simple():
s = get_session()
q = s.query(T3).filter(T3.t_list.elem_match({'i': 1}))
assert q.query == {'t_list': {'$elemMatch': {'i': 1}}}
q = s.query(T3).filter(T3.t_list.elem_match(T.i == 1))
assert q.query == {'t_list': {'$elemMatch': {'i': 1}}}, q.query
@raises(BadQueryException)
def test_non_seq_elem_match():
s = get_session()
s.query(T).filter(T.i.elem_match({'i':1}))
@raises(BadQueryException)
def test_bad_val():
s = get_session()
s.query(T3).filter(T3.t_list.elem_match(None))
#
# QueryField Tests
#
@raises(AttributeError)
def test_bad_query_field_name():
T.q
@raises(AttributeError)
def test_subitem_of_no_subitem():
T.i.i
def qf_parent_test():
assert str(T2.t.i._get_parent()) == 't'
@raises(BadQueryException)
def qf_bad_subfield_test():
assert str(T2.t.q) == 't.q'
def qf_db_name_test():
assert str(T.a) == 'aa', str(T.a)
#
# Value Encoding Type tests
#
def test_value_type_wrapping():
class User(Document):
bio = SetField(StringField())
s = get_session()
s.clear_collection(User)
q = s.query(User).in_(User.bio, 'MongoAlchemy').query
assert q == { 'bio' : { '$in' : ['MongoAlchemy'] } }, q
q = s.query(User).in_(User.bio, set(['MongoAlchemy'])).query
assert q == { 'bio' : { '$in' : [['MongoAlchemy']] } }, q
def test_value_type_wrapping_2():
class User(Document):
bio = KVField(StringField(), IntField())
s = get_session()
s.clear_collection(User)
q = s.query(User).in_(User.bio.k, 'MongoAlchemy').query
assert q == { 'bio.k' : { '$in' : ['MongoAlchemy'] } }, q
q = s.query(User).in_(User.bio, { 'MongoAlchemy' : 5}).query
assert q == { 'bio' : { '$in' : [[{'k': 'MongoAlchemy', 'v': 5}]] } }, q
@raises(BadValueException)
def test_value_type_wrapping_wrong_twice():
class User(Document):
bio = SetField(StringField())
s = get_session()
s.query(User).in_(User.bio, 1).query == { 'bio' : { '$in' : ['MongoAlchemy'] } }
def list_in_operator_test():
class User(Document):
ints = ListField(IntField())
s = get_session()
s.clear_collection(User)
q = s.query(User).filter_by(ints=3).query
assert q == { 'ints' : 3 }, q
q = s.query(User).filter(User.ints == 3).query
assert q == { 'ints' : 3 }, q
q = s.query(User).filter(User.ints == [3]).query
assert q == { 'ints' : [3] }, q
#
# Geo Tests
#
def test_geo():
class Place(Document):
config_collection_name = 'places4'
loc = GeoField()
val = IntField()
index = Index().geo2d(loc, min=-100, max=100)
s = Session.connect('unit-testing')
s.clear_collection(Place)
s.save(Place(loc=(1,1), val=2))
s.save(Place(loc=(5,5), val=4))
s.save(Place(loc=(30,30 ), val=5))
x = s.query(Place).filter(Place.loc.near(0, 1))
assert x.first().val == 2, x.query
xs = s.query(Place).filter(Place.loc.near(1, 1, max_distance=2)).all()
assert len(xs) == 1, xs
xs = s.query(Place).filter(Place.loc.near_sphere(1, 1, max_distance=50)).all()
assert len(xs) == 3
q = s.query(Place).filter(Place.loc.within_box([-2, -2], [2, 2]))
assert len(q.all()) == 1, q.query
q = s.query(Place).filter(Place.loc.within_radius(0, 0, 2))
assert len(q.all()) == 1, q.query
q = s.query(Place).filter(Place.loc.within_polygon(
[[-2, 0], [2, 0], [0, 2], [0, -2]]
))
assert len(q.all()) == 1, q.query
q = s.query(Place).filter(Place.loc.within_radius_sphere(30, 30, 0.0001))
assert len(q.all()) == 1, q.all()
def test_geo_haystack():
class Place(Document):
config_collection_name = 'places'
loc = GeoField()
val = IntField()
index = Index().geo_haystack(loc, bucket_size=100).descending('val')
s = Session.connect('unit-testing')
s.clear_collection(Place)
s.save(Place(loc=(1,1), val=2))
s.save(Place(loc=(5,5), val=4))
#
# Regex Tests
#
def test_regex():
class Spell(Document):
name = StringField()
s = Session.connect('unit-testing')
s.clear_collection(Spell)
s.save(Spell(name='Wingardium Leviosa'))
s.save(Spell(name='abracadabra'))
s.save(Spell(name='ab.*ra.ca.da.*bra'))
s.save(Spell(name='Alacazam'))
# check ignore case True
q = s.query(Spell).filter(Spell.name.startswith('wingardium', ignore_case=True))
assert q.first().name == 'Wingardium Leviosa'
# check ignore case False (default)
xs = s.query(Spell).filter(Spell.name.startswith('wingardium')).all()
assert len(xs) == 0
# check regex-free startswith and endswith
assert len(s.query(Spell).filter(Spell.name.startswith('ab.*ra.ca')).all()) == 1
assert len(s.query(Spell).filter(Spell.name.endswith('da.*bra')).all()) == 1
# check regex
assert len(s.query(Spell).filter(Spell.name.regex(r'^[Aa]\w*[am]$')).all()) == 2
# check regex with options
assert len(s.query(Spell).filter(Spell.name.regex(r'^[a]\w*[am]$', options='i')).all()) == 2
#
# Comparator Tests
#
@raises(BadValueException)
def qf_bad_value_equals_test():
T2.t.i == '3'
@raises(BadValueException)
def qf_bad_value_compare_test():
T2.t.i < '3'
def qf_dot_f_test():
class T3(Document):
i = IntField()
j = IntField(required=False)
l = ListField(IntField(), required=False)
a = IntField(required=False, db_field='aa')
index = Index().ascending('i')
class T4(Document):
t = DocumentField(T3)
assert str(T4.t.i) == 't.i'
def test_not():
not_q = Query(T, None).filter( ~(T.i == 3) ).query
assert not_q == { 'i' : {'$ne' : 3} }, not_q
not_q = Query(T, None).not_(T.i > 4).query
assert not_q == { 'i' : {'$not': { '$gt': 4}} }, not_q
@raises(BadQueryException)
def test_not_with_malformed_field():
class Any(Document):
i = AnythingField()
not_q = Query(Any, None).not_(Any.i == { '$gt' : 4, 'garbage' : 5})
def test_not_assign_dict_malformed_field():
class Any(Document):
i = AnythingField()
not_q = Query(Any, None).not_(Any.i == { 'a' : 4, 'b' : 5}).query
assert not_q == { 'i' : { '$ne' : { 'a' : 4, 'b' : 5 } } }, not_q
def test_not_db_test():
s = get_session()
s.save(T(i=5))
assert s.query(T).not_(T.i == 5).first() is None
assert s.query(T).not_(T.i > 6).one().i == 5
def test_or():
q = Query(T, None)
want = { '$or' : [{'i' : 3}, {'i' : 4}, {'i' : 5}] }
assert q.filter((T.i == 3) | (T.i == 4) | (T.i == 5)).query == want
assert Query(T, None).or_(T.i == 3, T.i == 4, T.i == 5).query == want
def test_in():
q = Query(T, None)
assert q.in_(T.i, 1, 2, 3).query == {'i' : {'$in' : [1,2,3]}}, q.in_(T.i, 1, 2, 3).query
assert q.filter(T.i.in_(1, 2, 3)).query == {'i' : {'$in' : [1,2,3]}}
def test_nin():
q = Query(T, None)
assert q.nin(T.i, 1, 2, 3).query == {'i' : {'$nin' : [1,2,3]}}, q.nin(T.i, 1, 2, 3).query
assert q.filter(T.i.nin(1, 2, 3)).query == {'i' : {'$nin' : [1,2,3]}}
def test_exists():
q = Query(T, None)
assert q.filter(T.i.exists(False)).query == {'i': {'$exists': False}}
assert q.filter(T.i.exists(True)).query == {'i': {'$exists': True}}
# free-form queries
def test_ffq():
s = get_session()
q = s.query('T')
assert q.filter(Q.name == 3).query == {'name' : 3}
q = s.query('T').filter(Q.name.first == 'jeff').query
assert q == {'name.first' : 'jeff'}, q
s.save(T(i=4))
assert s.query('T').count() == 1
assert s.query('T').filter(Q.i == 4).one()['i'] == 4
# Array Index Operator
def test_array_index_operator():
assert str(NestedParent.l.matched_index().i) == 'l.$.i', NestedParent.l.matched_index().i
|
StarcoderdataPython
|
4864284
|
# -*- coding:utf-8 -*-
f = open(r'stat_smoking.txt','r')
#a = list(f)
line = f.readline() # 读取第一行
print(line)
tu = eval(line)
#print(tu['ugcid'])
url_list = []
while line:
txt_data = eval(line)
print(txt_data['ugcid'])
url_list.append('https://kg.qq.com/node/play?s=' + txt_data['ugcid'])
line = f.readline()
print(url_list)
# txt_tables = []
# while line:
# txt_data = eval(line) # 可将字符串变为元组
# txt_tables.append(txt_data) # 列表增加
# line = f.readline() # 读取下一行
# print(txt_tables)
f.close()
|
StarcoderdataPython
|
6558902
|
from fastapi import APIRouter, HTTPException, BackgroundTasks
from containers import Managers
from domain.models.api_response import ApiResponse
from domain.models.dataset_information import DatasetInformation
from domain.exceptions.application_error import ApplicationError
from domain.models.hyper_parameter_information import HyperParameterInformation
configuration_factory = Managers.configuration_factory()
model_train_evaluate_manager = Managers.model_train_evaluate_manager()
export_manager = Managers.export_manager()
router = APIRouter()
"""
Create network configuration and start training and evaluation in the background and export the resulting model
Parameters
----------
dataset_info: DatasetInformation
object of type DatasetInformation containing the dataset info for the training
config: HyperParameterInformation
object of type HyperParameterInformation containing the hyper parameters for the training
Returns
-------
ApiResponse
ApiResponse containing success status
"""
@router.post('/')
async def start_train(background_tasks: BackgroundTasks, dataset_info: DatasetInformation, config: HyperParameterInformation):
try:
background_tasks.add_task(configuration_factory.create_configuration, dataset_info=dataset_info, config=config)
background_tasks.add_task(model_train_evaluate_manager.train_eval_continuously, config=config)
background_tasks.add_task(export_manager.save_trained_model, dataset_info=dataset_info, config=config)
return ApiResponse(success=True)
except ApplicationError as e:
raise HTTPException(status_code=400, detail=e.__str__())
except Exception as e:
raise HTTPException(status_code=500, detail=e.__str__())
|
StarcoderdataPython
|
3375809
|
<reponame>NikitaRastogi/handwritten
import os
import sys
from PIL import Image
if not os.path.exists('pages'):
print('Creating pages folder')
os.makedirs('pages')
try:
line_count = sys.argv[1]
except IndexError:
line_count = 20
def make_page(lines, count):
images = [Image.open(i) for i in lines]
no_of_lines = len(lines)
size = (1500, 100)
page_size = (size[0], size[1] * no_of_lines)
page = Image.new('RGB', page_size, color='white')
offset = 0
for im in images:
im.thumbnail(size)
page.paste(im, (0, offset))
offset += 100
page.save('pages/'+str(count)+'.png')
lines = os.listdir('images')
ranked_paths = [(int(i[:-4]),'images/'+i) for i in lines]
ranked_paths.sort()
paths = [i[1] for i in reversed(ranked_paths)]
count = 1
while paths:
page = []
for i in range(line_count):
try:
x = paths.pop()
except IndexError:
continue
else:
page.append(x)
make_page(page, count)
count += 1
|
StarcoderdataPython
|
1866771
|
import pandas as pd
movie = pd.read_csv('movie.csv')
rating = pd.read_csv('rating.csv')
df = movie.merge(rating, how="left", on="movieId")
df['title'] = df.title.str.replace('(\(\d\d\d\d\))', '')
df['title'] = df['title'].apply(lambda x: x.strip())
values_title = pd.DataFrame(df["title"].value_counts())
rare_movies = values_title[values_title["title"] <= 1000].index
common_movies = df[~df["title"].isin(rare_movies)]
user_movie_df = common_movies.pivot_table(index=["userId"], columns=["title"], values="rating")
user_id = 108170
random_user_df = user_movie_df[user_movie_df.index == user_id]
random_user_df
movies_watched = random_user_df.columns[random_user_df.notna().any()].tolist()
len(movies_watched)
movies_watched_df = user_movie_df[movies_watched]
movies_watched_df.head()
movies_watched_df.shape
user_movie_count = movies_watched_df.T.notnull().sum()
user_movie_count = user_movie_count.reset_index()
user_movie_count.columns = ["userId", "movie_count"]
perc = len(movies_watched) * 60 / 100
users_same_movies = user_movie_count[user_movie_count["movie_count"] > perc]["userId"]
final_df = pd.concat([movies_watched_df[movies_watched_df.index.isin(users_same_movies.index)],
random_user_df[movies_watched]])
final_df.head()
final_df.T.corr()
final_df.shape
corr_df = final_df.T.corr().unstack().sort_values().drop_duplicates()
corr_df = pd.DataFrame(corr_df, columns=["corr"])
corr_df.index.names = ['user_id_1', 'user_id_2']
corr_df = corr_df.reset_index()
corr_df.head()
top_users = corr_df[(corr_df["user_id_1"] == user_id) & (corr_df["corr"] >= 0.65)][
["user_id_2", "corr"]].reset_index(drop=True)
top_users = top_users.sort_values(by='corr', ascending=False)
top_users.rename(columns={"user_id_2": "userId"}, inplace=True)
top_users
top_users_ratings = top_users.merge(rating[["userId", "movieId", "rating"]], how='inner')
top_users_ratings
top_users_ratings.shape
top_users_ratings['weighted_rating'] = top_users_ratings['corr'] * top_users_ratings['rating']
top_users_ratings.head()
temp = top_users_ratings.groupby('movieId').sum()[['corr', 'weighted_rating']]
temp.columns = ['sum_corr', 'sum_weighted_rating']
temp.head()
recommendation_df = pd.DataFrame()
recommendation_df['weighted_average_recommendation_score'] = temp['sum_weighted_rating'] / temp['sum_corr']
recommendation_df['movieId'] = temp.index
recommendation_df = recommendation_df.sort_values(by='weighted_average_recommendation_score', ascending=False)
recommendation_df.head(10)
movie_user = movie.loc[movie['movieId'].isin(recommendation_df.head(10)['movieId'].head())]['title']
movie_user.head()
movie_user[:5].values
df['year_movie'] = df.title.str.extract('(\(\d\d\d\d\))', expand=False) #4 değer olan ifadeyi çek
df['year_movie'] = df.year_movie.str.extract('(\d\d\d\d)', expand=False) #parantezlerin içine alıyoruz
df['title'] = df.title.str.replace('(\(\d\d\d\d\))', '') #title içindeki yılı temizliyoruz
df['title'] = df['title'].apply(lambda x: x.strip()) #oluşan boşulkları sil
df.shape #(3400256, 7)
df.head()
df["genre"] = df["genres"].apply(lambda x: x.split("|")[0])
df.drop("genres", inplace=True, axis=1)
df.head()
df.info() #timestamp time formatında olması lazım
df["timestamp"] = pd.to_datetime(df["timestamp"], format='%Y-%m-%d')
df.info() #datetime64[ns]
#Verileri ayrı ayrı çektik
df["year"] = df["timestamp"].dt.year
df["month"] = df["timestamp"].dt.month
df["day"] = df["timestamp"].dt.day
df.head()
df.shape
df["title"].nunique() #eşsiz film sayısı :26213
a = pd.DataFrame(df["title"].value_counts())
a.head() #titlelara gelen puanlar
rare_movies = a[a["title"] <= 1000].index #1000 yorumun altındaki filmleri filtreledik
common_movies = df[~df["title"].isin(rare_movies)]
common_movies.shape #(2059083, 10)
common_movies["title"].nunique() #859
item_movie_df = common_movies.pivot_table(index=["userId"], columns=["title"], values="rating")
item_movie_df.shape #(23149, 859)
user_movie_df.head(10)
item_movie_df.columns
len(item_movie_df.columns)
common_movies["title"].nunique()
movieId = rating[(rating["rating"] == 5.0) & (rating["userId"] ==user_id)].sort_values(by="timestamp",ascending=False)["movieId"][0:1].values[0]
movie_title = movie[movie["movieId"] == movieId]["title"].str.replace('(\(\d\d\d\d\))', '').str.strip().values[0]
movie = item_movie_df[movie_title]
movie_item = item_movie_df.corrwith(movie).sort_values(ascending=False)
movie_item = item_movie_df[1:6].index
data_user_item = pd.DataFrame()
data_user_item["user_recommendations"] = movie_user[:5].values.tolist()
data_user_item["item_recommendations"] = movie_item[1:6].index
data_user_item
|
StarcoderdataPython
|
9778040
|
# coding: utf-8
"""
This module gives some AvailSet class for constructing avail_set
component. Also an abstract class is provided for customization.
Developers are suggested to extend the class ``AvailSetABC``
and implements all its abstract methods.
"""
import abc
class AvailSetABC:
"""
Abstract class for writing component ``avail_set``.
Developer should implement four functions: ``contain``,
``add`` ,``pop`` and ``delete``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def contain(self, item):
"""
Check whether the set contains the given item.
:param item: item to be checked
:return: bool, return True if the set contains given item
"""
pass
@abc.abstractmethod
def add(self, item):
"""
Add an item ti the set. If there had has been an
identical item in the set, then do nothing.
:param item: item to be added to the set
"""
pass
@abc.abstractmethod
def pop(self):
"""
Pop out an item from the set.
Raise an IndexError if the set is empty.
:return: item from the set
"""
pass
@abc.abstractmethod
def delete(self, item):
"""
delete an item from the set.
Raise KeyError if the item not found.
"""
pass
class ArrayStack(AvailSetABC):
"""
A simple stack-like implementation of AvailSetABC,
inside is a python list.
"""
def __init__(self):
self.stack_set = list()
def __len__(self):
return len(self.stack_set)
def contain(self, item):
return (item in self.stack_set)
def add(self, item):
if self.contain(item):
return
self.stack_set.append(item)
def pop(self):
if len(self.stack_set) == 0:
raise IndexError("ArrayStack is empty")
return self.stack_set.pop()
def delete(self, item):
try:
self.stack_set.remove(item)
except ValueError:
raise KeyError("res_id {0} not found".format(item))
class ArrayQueue(AvailSetABC):
"""
A simple queue-like implementation of AvailSetABC,
inside is a python list.
"""
def __init__(self):
self.queue_set = list()
def __len__(self):
return self.queue_set.__len__()
def contain(self, item):
return (item in self.queue_set)
def add(self, item):
if self.contain(item):
return
self.queue_set.append(item)
def pop(self):
if len(self.queue_set) == 0:
raise IndexError("ArrayQueue is empty")
tmp = self.queue_set[0]
del self.queue_set[0]
return tmp
def delete(self, item):
try:
self.queue_set.remove(item)
except ValueError:
raise KeyError("res_id {0} not found".format(item))
class LinkedQueue(AvailSetABC):
"""
A simple queue-like implementation of AvailSetABC,
essentially a linked list.
"""
class Node:
def __init__(self, item):
self.next = None
self.prev = None
self.item = item
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def __len__(self):
return self.size
def contain(self, item):
ptr = self.head
while ptr:
if ptr.item == item:
return True
ptr = ptr.next
return False
def add(self, item):
if self.contain(item):
return
node = self.Node(item)
if self.size == 0:
self.head = node
self.tail = node
self.size += 1
else:
self.head.prev = node
node.next = self.head
self.head = node
self.size += 1
def pop(self):
if self.size == 0:
raise IndexError("LinkedQueue is empty")
if self.size == 1:
item = self.tail.item
self.head = None
self.tail = None
self.size -= 1
return item
else:
item = self.tail.item
self.tail.prev.next = None
self.tail = self.tail.prev
self.size -= 1
return item
def delete(self, item):
ptr = self.head
while ptr:
if ptr.item == item:
ptr.prev.next = ptr.next
self.size -= 1
ptr = ptr.next
raise KeyError("res_id {0} not found".format(item))
|
StarcoderdataPython
|
5117527
|
import hashlib
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
def hash_token(plaintext):
return hashlib.sha256(plaintext.encode('utf-8')).hexdigest()
class TokenBackend(ModelBackend):
def authenticate(self, request, token=None):
if token:
try:
return get_user_model().objects.get(usertoken__token=hash_token(token))
except ObjectDoesNotExist:
pass
return None
|
StarcoderdataPython
|
6455847
|
<gh_stars>0
# Generated by Django 2.2 on 2019-05-12 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moment', '0023_daily_story_file_status'),
]
operations = [
migrations.AddField(
model_name='daily_story_file_status',
name='month',
field=models.CharField(default='', max_length=50),
),
]
|
StarcoderdataPython
|
4949119
|
<filename>ACI/aci_endpoints_with_vendor.py
#!/usr/bin/env python
#
#
# <NAME> 2018
#
# APIC login username: mipetrin
# APIC URL: https://10.66.80.242
# APIC Password: <PASSWORD>
#
"""
Simple application to display details about endpoints
"""
import acitoolkit.acitoolkit as aci
from tabulate import tabulate
import requests
def main():
"""
Main Show Endpoints Routine
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = ('Simple application to display details about endpoints')
creds = aci.Credentials('apic', description)
args = creds.get()
# Login to APIC
session = aci.Session(args.url, args.login, args.password)
# session = aci.Session(URL1, LOGIN1, PASSWORD1)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
return
# Download all of the interfaces and store the data as tuples in a list
data = []
endpoints = aci.Endpoint.get(session)
for ep in endpoints:
epg = ep.get_parent()
app_profile = epg.get_parent()
tenant = app_profile.get_parent()
# Check each MAC address via API call to macvendors.com to identify hardware vendor
url = "http://api.macvendors.com/" + ep.mac
response = requests.request("GET", url)
mac_vendor = response.text
# Store in list as tuple, that will be printed in the tabulate format
data.append((ep.mac, mac_vendor, ep.ip, ep.if_name, ep.encap,
tenant.name, app_profile.name, epg.name))
# Display the data downloaded
print tabulate(data, headers=["MACADDRESS", "MAC VENDOR", "IPADDRESS", "INTERFACE",
"ENCAP", "TENANT", "APP PROFILE", "EPG"], tablefmt="simple")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
StarcoderdataPython
|
3426432
|
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render
from django.http import HttpResponse
from tse_demo.settings import BASE_DIR
import os
from importcrdata.models import PatronElectoral,Distelec
from . import forms
import re
# Create your views here.
import logging
import queue
import threading
import time
def typeValidator(data,fn):
try:
result = fn(data)
return result
except:
return 'error'
def tsePaginator(search_name,type,page):
if type == 'int':
if len(search_name) == 6: #i
temp_data = PatronElectoral.objects.filter(codele__icontains=str(search_name))
if len(temp_data) == 0:
temp_data = PatronElectoral.objects.filter(cedula__icontains=str(search_name))
temp_page = page
temp_paginator = Paginator(temp_data, 5)
else:
temp_data = PatronElectoral.objects.filter(cedula__icontains=str(search_name))
temp_page = page
temp_paginator = Paginator(temp_data,5)
try:
temp_datas = temp_paginator.page(temp_page)
except PageNotAnInteger:
temp_datas = temp_paginator.page(1)
except EmptyPage:
temp_datas = temp_paginator.page(temp_paginator.num_pages)
return temp_datas
elif type == 'str':
temp_data = PatronElectoral.objects.filter(nombre__icontains=str(search_name))
temp_data2 = PatronElectoral.objects.filter(apellido1__icontains = str(search_name))
temp_data3 = PatronElectoral.objects.filter(apellido2__icontains=str(search_name))
temp_page = page
whole_data = temp_data | temp_data2 | temp_data3
temp_paginator = Paginator(whole_data, 5)
try:
temp_datas = temp_paginator.page(temp_page)
except PageNotAnInteger:
temp_datas = temp_paginator.page(1)
except EmptyPage:
temp_datas = temp_paginator.page(temp_paginator.num_pages)
return temp_datas
elif type == 'init':
temp_data = PatronElectoral.objects.all()
temp_page = page
temp_paginator = Paginator(temp_data, 5)
try:
temp_datas = temp_paginator.page(temp_page)
except PageNotAnInteger:
temp_datas = temp_paginator.page(1)
except EmptyPage:
temp_datas = temp_paginator.page(temp_paginator.num_pages)
return temp_datas
def getTseData(request):
form = forms.SearchForm()
if request.method == 'POST':
form = forms.SearchForm(request.POST)
if form.is_valid():
if form.cleaned_data['search_name'] != '':
if typeValidator(form.cleaned_data['search_name'], int) != 'error':
padrones = tsePaginator(form.cleaned_data['search_name'], 'int', request.GET.get('page', 1))
elif typeValidator(form.cleaned_data['search_name'], str) != 'error':
padrones = tsePaginator(form.cleaned_data['search_name'], 'str', request.GET.get('page', 1))
return render(request, 'test.html', {'padrones': padrones, 'searchForm': form})
padrones = tsePaginator('', 'init', request.GET.get('page', 1))
return render(request, 'test.html', {"padrones": padrones, 'searchForm': form})
my_queue = queue.Queue()
def storeInQueue(f):
def wrapper(*args):
my_queue.put(f(*args))
return wrapper
@storeInQueue
def loadDataToBd():
file1 = open("PADRON_COMPLETO.txt", encoding="latin-1")
lines = []
x=0
then = time.time()
for line in file1:
newLine = re.sub(' +|\n', ' ', line)
newLine = newLine.split(',')
newLine[-1] = newLine[-1].split(' ')[0]
lines.append(newLine)
PatronElectoral.objects.create(cedula=newLine[0], codele=newLine[1], sexo=newLine[2], fechacaduc=newLine[3], junta=newLine[4], nombre=newLine[5], apellido1=newLine[6], apellido2=newLine[7])
x = x + 1.
now = time.time()
print("It took: ", now-then, " seconds")
def loadDataView(request):
x = threading.Thread(target=loadDataToBd)
x.start()
my_queue = queue.Queue()
my_data = my_queue.get()
return render(request, 'test.html', context = {'data': my_data})
|
StarcoderdataPython
|
8178199
|
'''Homework 4, Computational Photonics, SS 2020: Fourier modal method.
'''
import numpy as np
from numpy.linalg import eig, solve
from scipy.linalg import toeplitz
from scipy.fftpack import fft
from scipy.sparse import diags
def fmm1d_te_layer_modes(perm, period, k_0, k_x, N, dtype=np.complex128):
'''Calculates the TE eigenmodes of a one-dimensional grating layer.
Arguments
---------
perm: 1d-array
permittivity distribution
period: float
grating period
k_0: float
vacuum wavenumber
k_x: float
transverse wave vector
N: int
number of positive Fourier orders
Returns
-------
beta: 1d-array
propagation constants of the eigenmodes
phie: 2d-array
Fourier coefficients of the eigenmodes (each column
corresponds to one mode)
'''
# number of points in x direction
N_x = perm.size
perm = perm.astype(dtype)
# Fourier coefficients of the permittivity
perm_fc = (fft(perm) / (N_x - 1)).astype(dtype)
# take the first 2 * N positive and 0 frequency
perm_fc_pos = perm_fc[:2 * N + 1]
# take the first 2 * N negative and 0 frequency
perm_fc_neg = np.concatenate((np.array(perm_fc[:1]),
perm_fc[-(2 * N):][::-1]), axis=0)
# calculate grating
Gm = np.arange(-N, N + 1, 1, dtype=dtype) * 2 * np.pi / period
# create the Toeplitz matrix containing the Fourier coefficients of perm
eps_hat = toeplitz(perm_fc_pos, perm_fc_neg).astype(dtype)
# create \hat K Matrix
K_hat_square = diags((Gm + k_x) ** 2, offsets=0).todense().astype(dtype)
# create final matrix
M_hat = (k_0 ** 2 * eps_hat - K_hat_square).astype(dtype)
# calculate the eigenvalues and eigenvectors of M_hat
eig_values, eig_vectors = eig(M_hat)
# take sqrt to get the propagation constant
beta = np.sqrt(eig_values).astype(dtype)
# invert eigenvalue if it corresponds to a backward propagating direction
beta[np.real(beta) + np.imag(beta) < 0] *= -1
return beta, eig_vectors
def fmm1d_te(lam, theta, period, perm_in, perm_out,
layer_perm, layer_thicknesses, N, dtype=np.complex128):
'''Calculates the TE diffraction efficiencies for a one-dimensional
layered grating structure using the T-matrix method.
Arguments
---------
lam: float
vacuum wavelength
theta: float
angle of incidence in rad
period: float
grating period
perm_in: float
permittivity on the incidence side
perm_out: float
permittivity on the exit side
layer_perm: 2d-array
permittivity distribution within the grating
layers (matrix, each row corresponds to one layer)
layer_thicknesses: 1d-array
thicknesses of the grating layers
N: int
number of positive Fourier orders
Returns
-------
eta_r: 1d-array
diffraction efficiencies of the reflected diffraction orders
eta_t: 1d-array
diffraction efficiencies of the transmitted diffraction orders
r: 1d-array
amplitude reflection coefficients of the reflected
diffraction orders
t: 1d-array
amplitude transmission coefficients of the transmitted
diffraction orders
'''
# vacuum wave vector
k_0 = 2 * np.pi / lam + 0j
# x component of k
k_x = k_0 * np.sqrt(perm_in) * np.sin(theta)
# create grating
G = (2 * np.pi / period) * np.arange(-N, N + 1, dtype=dtype)
# create K_hat matrix
K_hat_square = np.diag((k_x + G) ** 2).astype(dtype)
# initial phi electrical
ident = np.identity(2 * N + 1, dtype=dtype)
# initial beta
beta_0_hat = np.sqrt(k_0 ** 2 * perm_in * ident - K_hat_square, dtype=dtype)
beta_0_hat[np.real(beta_0_hat) + np.imag(beta_0_hat) < 0.0] *= -1
# initial block matrix
T_matrix = np.block([[ident, ident],
[beta_0_hat, -beta_0_hat]])
# iterate over all z layers
for lt, perm in zip(layer_thicknesses, layer_perm):
# get the betas and phi_e in this layer
beta, phi_e = fmm1d_te_layer_modes(perm, period, k_0, k_x, N,
dtype=dtype)
# convert beta to beta_hat containing the entries on the diagonal
beta_hat = np.diag(beta).astype(dtype)
# matrices for forward and backward propagation
p_pos = np.diag(np.exp(1j * beta * lt)).astype(dtype)
p_neg = np.diag(np.exp(-1j * beta * lt)).astype(dtype)
# create A matrix which is needed to get the new transfer matrix
A = np.block([[phi_e, phi_e],
[np.dot(phi_e, beta_hat),
np.dot(-phi_e, beta_hat)]])
# put the propagation matrices in a block matrix
p_mat = np.block([[p_pos, np.zeros(p_pos.shape)],
[np.zeros(p_pos.shape), p_neg]])
T = A @ solve(A.T, p_mat.T).T
T_matrix = T @ T_matrix
# beta_out_hat matrix
beta_out_hat = np.sqrt(k_0 ** 2 * perm_out * ident
- K_hat_square, dtype=dtype)
beta_out_hat[np.real(beta_out_hat) + np.imag(beta_out_hat) < 0.0] *= -1
# last missing matrix which inverse is left multiplied
T_final = np.block([[ident, ident],
[beta_out_hat, - beta_out_hat]]).astype(dtype)
# create the final transfer matrix
T_matrix = solve(T_final, T_matrix)
# initial amplitudes
a_in = np.zeros(2 * N + 1, dtype=dtype)
# set only this input coefficient to 1
a_in[N] = 1 + 0j
# extract the four block matrices from the T_matrix
index_1 = slice(None, 2 * N + 1)
index_2 = slice(2 * N + 1, None)
t11 = T_matrix[index_1, index_1]
t12 = T_matrix[index_1, index_2]
t21 = T_matrix[index_2, index_1]
t22 = T_matrix[index_2, index_2]
# calculate R and T matrices
r = np.dot(-solve(t22, t21), a_in[:, np.newaxis])
t = np.dot((t11 - t12 @ solve(t22, t21)), a_in[:, np.newaxis])
# extract the diagonal elements
beta_in = np.diag(beta_0_hat).astype(dtype)
# calculate transmission and reflection efficiencies
eta_r = np.real(1 / np.real(beta_in[N]) *
np.dot(np.real(beta_0_hat),
np.multiply(r, np.conj(r))))
eta_t = np.real(1 / np.real(beta_in[N]) *
np.dot(np.real(beta_out_hat),
np.multiply(t, np.conj(t))))
#return 1D arrays
eta_r = eta_r.A1
eta_t = eta_t.A1
r = r.A1
t = t.A1
return eta_r, eta_t, r, t
|
StarcoderdataPython
|
1805285
|
<reponame>CharlesZhong/Mobile-Celluar-Measure<gh_stars>0
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011, <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* The name of the contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
from ctypes import cdll as loader
# Generic constants
__VERSION__ = "0.2.2"
PIXEL_SZ = 3
PIXEL_ALPHA_SZ = 4
# Per-OS setup
if sys.platform == "win32":
_LIBRARY = "libwebp_a.dll"
elif sys.platform == "linux2":
_LIBRARY = "libwebp.so"
elif sys.platform == "darwin":
_LIBRARY = "libwebp.dylib"
else:
raise NotImplementedError(
"Test non implemented under {0}".format(sys.platform))
# Load library
_LIBRARY = loader.LoadLibrary(_LIBRARY)
|
StarcoderdataPython
|
1733519
|
"""
@author: Heerozh (<NAME>)
@copyright: Copyright 2019, Heerozh. All rights reserved.
@license: Apache 2.0
@email: <EMAIL>
"""
from collections import defaultdict
from typing import Dict
import pandas as pd
class Calendar:
"""
Usage:
call build() first, get business day calendar.
and manually add holiday by calling set_as_holiday().
if open half-day, use remove_events() remove all events that day, and add_event() manually.
US holiday calendar can found at https://iextrading.com/trading/
"""
def __init__(self, csv_file=None) -> None:
# todo: read from file
if csv_file is not None:
pass
else:
self.events = defaultdict(list)
def to_csv(self):
# todo save to file
pass
def build(self, end, events: Dict[str, pd.Timestamp], tz='UTC', freq='B'):
""" build("2020", {'Open': pd.Timestamp(9:00), 'Close': pd.Timestamp(15:00)}) """
days = pd.date_range(pd.Timestamp.now(), end, tz=tz, freq=freq)
self.events = {name: days + time for name, time in events.items()}
def add_event(self, event: str, date_time: pd.Timestamp):
self.events[event].append(date_time)
self.events[event].sort()
def remove_events(self, date):
self.events = {
event: [time for time in times if times.date != date]
for event, times in self.events.items()
}
def set_as_holiday(self, date):
# 要考虑下calendar设错的情况,比如下单到关闭还没成交的话,订单都默认会取消的,
# 下个日期重新算就是了,加个测试用例
return self.remove_events(date)
def next(self, event_name):
"""return the next time of this event"""
# todo, remove pasted times, and return next
pass
|
StarcoderdataPython
|
9796720
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Zun DB test base class."""
import fixtures
import zun.conf
from zun.db import api as db_api
from zun.db.sqlalchemy import api as sqla_api
from zun.db.sqlalchemy import migration
from zun.db.sqlalchemy import models
from zun.tests import base
CONF = zun.conf.CONF
_DB_CACHE = None
class Database(fixtures.Fixture):
def __init__(self, db_api, db_migrate, sql_connection):
self.sql_connection = sql_connection
self.engine = db_api.get_engine()
self.engine.dispose()
conn = self.engine.connect()
self.setup_sqlite(db_migrate)
self.post_migrations()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
def setup_sqlite(self, db_migrate):
if db_migrate.version():
return
models.Base.metadata.create_all(self.engine)
db_migrate.stamp('head')
def _setUp(self):
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
def post_migrations(self):
"""Any addition steps that are needed outside of the migrations."""
class DbTestCase(base.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.dbapi = db_api._get_dbdriver_instance()
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(sqla_api, migration,
sql_connection=CONF.database.connection)
self.useFixture(_DB_CACHE)
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return {k: v for k, v in obj.items()
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.items():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualOrderedListOfObjects(self, objs1, objs2,
ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
conv = lambda objs: [obj_to_dict(obj) for obj in objs]
self.assertEqual(conv(objs1), conv(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
|
StarcoderdataPython
|
4820462
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
def countingSort(arr):
'''
计数排序: 桶排序的特殊情况, 将数据值的范围作为索引, 反过来进行排序
适合值的范围有限且均为正整数的数据
适用的实际情况:对考试分数排序
1. 非原地排序算法: 需要计数数组, 空间由值的范围确定,一般比较小
需要一个已排序数组, 长度与待排序数组一样, 空间复杂度为O(n)
2. 稳定的:从后向前遍历数组, 后面的元素, 仍然在后面
3. 时间复杂度: O(n), 构造好计数桶后, 从后向前遍历待排序数组, 根据桶的值, 直接插入到已排序数组
要求:待排序数组的值均为正整数(不包含0)
排序前格式化数组:
存在小于1的整数, 数组的每个值都增加最小值绝对值+1, 使最小值为1
存在小数, 同时扩大倍数:小数位数*10
'''
absVal = 0
if min(arr) <= 0:
absVal = abs(min(arr)) + 1
arr = [i + absVal for i in arr]
arrLength = len(arr)
countArrLength = max(arr)
countArr = [0] * countArrLength
sortedArr = [0] * arrLength
# [2, 2, 3, 2, 0, 0, 0, 0, 1]
for i in range(arrLength):
countArr[arr[i] - 1] += 1
# [2, 4, 7, 9, 9, 9, 9, 9, 10]
for i in range(1, countArrLength):
countArr[i] += countArr[i - 1]
# 进行排序, 为了稳定性,从后向前遍历数据,
# 根据数据的值从桶里取数,这个数就是排序后的数据位置,
# 取数后要将桶的数的值减一, 这就是当前桶对应的待排序数据的下一个数据位置, 也是当前数据对应的已排序数组索引
for i in arr[::-1]:
countArr[i - 1] = countArr[i - 1] - 1
sortedArr[countArr[i - 1]] = i
if absVal > 0:
sortedArr = [i - absVal for i in sortedArr]
return sortedArr
def radixSort(arr):
'''
基数排序: 桶排序的拓展
将数组元素看作一个数组, 适合比较长的数字排序
电话号码的排序
1. 非原地排序算法: 需要计数数组, 空间由元素的值的范围确定,一般为0,1,2...9
需要一个已排序数组, 长度与待排序数组一样, 空间复杂度为O(n)
2. 稳定的: 基于计数排序, 计数排序是稳定的
3. 时间复杂度: O(len(arr)*n),
使用计数排序, 用数组元素的指定数位的值构造好计数桶后, 从后向前遍历待排序数组,
根据桶的值, 直接插入到已排序数组, 时间复杂度为O(n)
从低位向高位迭代, 进行元素的最大长度次迭代, 排序完成
要求: 数组元素的数位长度是一致的. 不足的可以往高位补0
sample:[12341234,25342534,16781678,67296729]
'''
arrLen = len(arr)
arrSorted = [0] * arrLen
eleLenMax = max([len(e) for e in [str(i) for i in arr]])
# 从后向前, 比较每个元素上数位的值的大小并以此排序元素
for ieArr in range(eleLenMax - 1, -1, -1):
arrPos = []
# 拼接数位数组
for eArr in [str(i) for i in arr]:
arrPos.append(int(eArr[ieArr]))
# 计数排序 数位数组
arrEleCount = RadixCounting(arrPos)
# 根据数位数组的值去统计数组寻找位置,
# 数位数组的索引与待排序数组索引是一样的, 关联排序
index = arrLen - 1
for i in arrPos[::-1]:
arrEleCount[i] = arrEleCount[i] - 1
arrSorted[arrEleCount[i]] = arr[index]
index -= 1
# 进行下一次迭代
arr = arrSorted.copy()
return arrSorted
def RadixCounting(arrPos):
'''
对应数位上的值数组进行计数排序
返回计数数组
'''
arrEleCount = [0] * 10
# 计数排序 数位数组
for i in range(len(arrPos)):
arrEleCount[arrPos[i]] += 1
for i in range(1, 10):
arrEleCount[i] += arrEleCount[i - 1]
return arrEleCount
if __name__ == '__main__':
print(datetime.now().strftime('%H:%M:%S.%f'))
print('countingSort', countingSort([1, -2, 3, 0, 9, 3, 4, -1, 2, 3]))
print(datetime.now().strftime('%H:%M:%S.%f'))
print(
'radixSort',
radixSort([12341234, 25342534, 16781678, 67296729, 10781678,
67295729]))
print(datetime.now().strftime('%H:%M:%S.%f'))
|
StarcoderdataPython
|
9630290
|
<gh_stars>1-10
import numpy as np
import argparse
from .lib import get_attention
parser = argparse.ArgumentParser('parameters')
parser.add_argument('path', type=str)
parser.add_argument('--test-iter', type=int, default=0)
parser.add_argument('--layer', type=int, default=3)
parser.add_argument('--block', type=int, default=0)
parser.add_argument('--head', type=int, default=0)
parser.add_argument('--height', type=int, default=3)
parser.add_argument('--width', type=int, default=3)
parser.add_argument('--kernel', type=int, default=7)
args = parser.parse_args()
path = args.path
test_iter = args.test_iter
layer = args.layer
block = args.block
head = args.head
height = args.height
width = args.width
kernel = args.kernel
print(get_attention(path, test_iter, block, head, height, width, kernel))
|
StarcoderdataPython
|
134177
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
##########################################################################
# Public methods
##########################################################################
## May be called to connect the DotUI functionality to an application
# instance. This isn't done automatically because some applications
# may have graphs for which it doesn't make sense to use Dots. Typically
# this function would be called from an application startup file.
def connect( applicationRoot ) :
applicationRoot.__dotUIConnected = True
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
Gaffer.Dot,
"description",
"""
A utility node which can be used for organising large graphs.
""",
"nodeGadget:minWidth", 0.0,
"nodeGadget:padding", 0.5,
"layout:activator:labelTypeIsCustom", lambda node : node["labelType"].getValue() == node.LabelType.Custom,
plugs = {
"in" : [
"plugValueWidget:type", ""
],
"out" : [
"plugValueWidget:type", ""
],
"labelType" : [
"description",
"""
The method used to apply an optional label
to the dot. Using a node name is recommended,
because it encourages the use of descriptive node
names, and updates automatically when nodes are
renamed or upstream connections change. The custom
label does however provide more flexibility, since
node names are restricted in the characters they
can use.
""",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"nodule:type", "",
"preset:None", Gaffer.Dot.LabelType.None,
"preset:Node Name", Gaffer.Dot.LabelType.NodeName,
"preset:Upstream Node Name", Gaffer.Dot.LabelType.UpstreamNodeName,
"preset:Custom", Gaffer.Dot.LabelType.Custom,
],
"label" : [
"description",
"""
The label displayed when the type is set to custom.
""",
"nodule:type", "",
"layout:activator", "labelTypeIsCustom",
],
},
)
##########################################################################
# NodeGraph menus
##########################################################################
def __insertDot( menu, destinationPlug ) :
nodeGraph = menu.ancestor( GafferUI.NodeGraph )
gadgetWidget = nodeGraph.graphGadgetWidget()
graphGadget = nodeGraph.graphGadget()
with Gaffer.UndoContext( destinationPlug.ancestor( Gaffer.ScriptNode ) ) :
node = Gaffer.Dot()
graphGadget.getRoot().addChild( node )
node.setup( destinationPlug )
node["in"].setInput( destinationPlug.getInput() )
destinationPlug.setInput( node["out"] )
menuPosition = menu.popupPosition( relativeTo = gadgetWidget )
position = gadgetWidget.getViewportGadget().rasterToGadgetSpace(
IECore.V2f( menuPosition.x, menuPosition.y ),
gadget = graphGadget
).p0
graphGadget.setNodePosition( node, IECore.V2f( position.x, position.y ) )
def __connectionContextMenu( nodeGraph, destinationPlug, menuDefinition ) :
applicationRoot = nodeGraph.scriptNode().ancestor( Gaffer.ApplicationRoot )
connected = False
with IECore.IgnoredExceptions( AttributeError ) :
connected = applicationRoot.__dotUIConnected
if not connected :
return
if len( menuDefinition.items() ) :
menuDefinition.append( "/DotDivider", { "divider" : True } )
menuDefinition.append(
"/Insert Dot",
{
"command" : functools.partial( __insertDot, destinationPlug = destinationPlug ),
"active" : not destinationPlug.getFlags( Gaffer.Plug.Flags.ReadOnly ),
}
)
__connectionContextMenuConnection = GafferUI.NodeGraph.connectionContextMenuSignal().connect( __connectionContextMenu )
|
StarcoderdataPython
|
3303433
|
import re
texto = """Olá Mundo!
Meu nome é <NAME>.
Meu numero é (011) 988255673
Meu E-mail é <EMAIL>
"""
Re_Padrao_Telefone = re.compile(r"\(\d{3}\)\s\d{9}") #Expreção que encontrara nosso numero
Resultado = Re_Padrao_Telefone.search(texto) #search Procura o numero no texto, findall procura todos os numeros disponiveis
print(Resultado.group()) #Group mostra oque foi encontrado, groups mostra mais de um
|
StarcoderdataPython
|
3234268
|
<reponame>leolani/emissor
import logging
from glob import glob
import os
from pathlib import Path
from types import MappingProxyType
from typing import Iterable, Optional, Any, Union, Mapping, Dict, Tuple
from emissor.representation.scenario import Scenario, Modality, Signal, AudioSignal, ImageSignal, TextSignal, ScenarioContext
from emissor.representation.util import unmarshal, marshal
logger = logging.getLogger(__name__)
ANNOTATION_TOOL_ID = "annotation_tool"
DEFAULT_SIGNAL_PATHS = MappingProxyType({
Modality.AUDIO.name.lower(): "./audio.json",
Modality.IMAGE.name.lower(): "./image.json",
Modality.TEXT.name.lower(): "./text.json"
})
def file_name(path):
return os.path.splitext(base_name(path))[0]
def base_name(path):
return os.path.basename(path)
class ScenarioController:
def __init__(self, scenario: Scenario, storage):
self._storage = storage
self._scenario = scenario
self._signals = dict()
def append_signal(self, signal: Signal[Any, Any]):
if signal.modality not in self._signals:
self.load_signals((signal.modality,))
self._signals[signal.modality].append(signal)
@property
def id(self) -> str:
return self.scenario.id
@property
def scenario(self) -> Scenario:
return self._scenario
@property
def signals(self) -> Mapping[Modality, Iterable[Signal[Any, Any]]]:
return dict(self._signals)
def load_signals(self, modalities: Tuple[Modality]):
for modality in modalities:
signals = self._storage.load_modality(self.scenario.id, modality)
signals = signals if signals else []
self._signals[modality] = signals
def get_signals(self, modality: Modality) -> Iterable[Signal[Any, Any]]:
if modality not in self._signals:
self.load_signals((modality,))
return list(self._signals[modality])
class ScenarioStorage:
EXTENSION = ".json"
def __init__(self, data_path):
self._data_path = data_path
self._create_path(data_path)
@property
def base_path(self):
return self._data_path
def list_scenarios(self) -> Iterable[str]:
return tuple(os.path.basename(path[:-1]) for path in glob(os.path.join(self.base_path, "*", "")))
def create_scenario(self, scenario_id: str, start: int, end: int, context: ScenarioContext,
signals: Dict[str, str] = DEFAULT_SIGNAL_PATHS) -> ScenarioController:
scenario = Scenario.new_instance(scenario_id, start, end, context, signals)
metadata_path = self._get_scenario_metadata_path(scenario_id)
self._create_path(self._get_scenario_path(scenario.id))
with open(metadata_path, 'w') as json_file:
json_file.write(marshal(scenario, cls=Scenario))
return ScenarioController(scenario, self)
def load_scenario(self, scenario_id: str) -> Optional[ScenarioController]:
scenario_path = self._get_scenario_metadata_path(scenario_id)
if not os.path.isfile(scenario_path):
raise ValueError(f"No scenario with id {scenario_id} at {scenario_path}")
with open(scenario_path) as json_file:
json_string = json_file.read()
scenario = unmarshal(json_string, cls=Scenario)
return ScenarioController(scenario, self)
def save_scenario(self, scenario: ScenarioController) -> None:
if not isinstance(scenario, ScenarioController):
raise ValueError("Can only save ScenarioController instances, got: " + type(scenario) + ". See the #create_scenario method.")
scenario_metadata_path = self._get_scenario_metadata_path(scenario.id)
plain_scenario = scenario.scenario
with open(scenario_metadata_path, 'w') as json_file:
json_file.write(marshal(plain_scenario, cls=Scenario))
for modality, signals in scenario.signals.items():
self._save_signals(self._get_metadata_path(plain_scenario, modality), signals, modality)
def _save_signals(self, path, signals, modality: Modality):
if modality == Modality.IMAGE:
cls = ImageSignal
elif modality == Modality.TEXT:
cls = TextSignal
elif modality == Modality.AUDIO:
cls = AudioSignal
else:
raise ValueError(f"Unsupported modality: {modality}")
with open(path, 'w') as json_file:
json_file.write(marshal(signals, cls=cls))
def load_modality(self, scenario_id: str, modality: Modality) -> Optional[Iterable[Signal[Any, Any]]]:
scenario = self.load_scenario(scenario_id)
modality_meta_path = self._get_metadata_path(scenario.scenario, modality)
if not modality_meta_path or not os.path.isfile(modality_meta_path):
return None
with open(modality_meta_path) as json_file:
if modality == Modality.IMAGE:
cls = ImageSignal
elif modality == Modality.TEXT:
cls = TextSignal
elif modality == Modality.AUDIO:
cls = AudioSignal
else:
raise ValueError(f"Unsupported modality: {modality}")
return unmarshal(json_file.read(), cls=cls)
def _get_scenario_path(self, scenario_id):
return os.path.join(self.base_path, scenario_id)
def _get_scenario_metadata_path(self, scenario_id):
return os.path.join(self._get_scenario_path(scenario_id), scenario_id + self.EXTENSION)
def _get_metadata_path(self, scenario: Scenario, modality: Union[Modality, str]):
scenario_path = self._get_scenario_path(scenario.id)
modality_key = modality if isinstance(modality, str) else modality.name.lower()
if modality_key not in scenario.signals:
return None
relative_path = scenario.signals[modality_key]
return os.path.join(scenario_path, relative_path)
def _create_path(self, data_path):
path = Path(data_path)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
logger.info("Created directory %s", path)
|
StarcoderdataPython
|
9654713
|
class Solution:
def solve(self, n):
ans = []
n = str(n).rjust(4,"0")
ans.append("M"*int(n[0]))
if n[1] == "9":
ans.append("CM")
elif n[1] == "4":
ans.append("CD")
elif n[1] >= "5":
ans.append("D")
ans.append("C"*(int(n[1])-5))
else:
ans.append("C"*int(n[1]))
if n[2] == "9":
ans.append("XC")
elif n[2] == "4":
ans.append("XL")
elif n[2] >= "5":
ans.append("L")
ans.append("X"*(int(n[2])-5))
else:
ans.append("X"*int(n[2]))
if n[3] == "9":
ans.append("IX")
elif n[3] == "4":
ans.append("IV")
elif n[3] >= "5":
ans.append("V")
ans.append("I"*(int(n[3])-5))
else:
ans.append("I"*int(n[3]))
return "".join(ans)
|
StarcoderdataPython
|
8188823
|
<reponame>apcarrik/kaggle<filename>duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_30/rule_19.py<gh_stars>0
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Age, obj[4]: Education, obj[5]: Occupation, obj[6]: Bar, obj[7]: Coffeehouse, obj[8]: Restaurant20to50, obj[9]: Direction_same, obj[10]: Distance
# {"feature": "Distance", "instances": 34, "metric_value": 0.9597, "depth": 1}
if obj[10]>1:
# {"feature": "Occupation", "instances": 20, "metric_value": 0.7219, "depth": 2}
if obj[5]>5:
# {"feature": "Coupon", "instances": 13, "metric_value": 0.8905, "depth": 3}
if obj[2]>0:
# {"feature": "Time", "instances": 10, "metric_value": 0.971, "depth": 4}
if obj[1]>1:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.65, "depth": 5}
if obj[8]>1.0:
return 'True'
elif obj[8]<=1.0:
# {"feature": "Education", "instances": 2, "metric_value": 1.0, "depth": 6}
if obj[4]>0:
return 'True'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]<=1:
# {"feature": "Age", "instances": 4, "metric_value": 0.8113, "depth": 5}
if obj[3]<=4:
return 'False'
elif obj[3]>4:
return 'True'
else: return 'True'
else: return 'False'
elif obj[2]<=0:
return 'True'
else: return 'True'
elif obj[5]<=5:
return 'True'
else: return 'True'
elif obj[10]<=1:
# {"feature": "Education", "instances": 14, "metric_value": 0.9403, "depth": 2}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
# {"feature": "Bar", "instances": 6, "metric_value": 0.65, "depth": 3}
if obj[6]<=2.0:
return 'True'
elif obj[6]>2.0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
|
StarcoderdataPython
|
3346866
|
import json
import dash
import dash_html_components as html
from dash.dependencies import Input, Output
import imageio
from dash_slicer import VolumeSlicer
app = dash.Dash(__name__, update_title=None)
vol = imageio.volread("imageio:stent.npz")
slicer = VolumeSlicer(app, vol)
slicer.graph.config["scrollZoom"] = False
app.layout = html.Div([slicer.graph, slicer.slider, html.Div(id='info'), *slicer.stores])
@app.callback(
Output("info", "children"),
[Input(slicer.state.id, "data")]
)
def update_info(state):
return json.dumps(state, indent=4)
if __name__ == "__main__":
app.run_server(debug=True, dev_tools_props_check=False)
|
StarcoderdataPython
|
4847421
|
'''
Author: <NAME>
Lang: python3
Github: https://www.github.com/ajaymahar
YT: https://www.youtube.com/ajaymaharyt
'''
class Sort:
def __init__(self):
"""TODO: Docstring for __init__.
:returns: TODO
"""
pass
def partition(self, arr, start, end):
"""TODO: Docstring for partition.
:arr, start, end: TODO
:returns: TODO
"""
pivot = arr[end]
pIndex = start
for i in range(start, end):
if arr[i] <= pivot:
arr[pIndex], arr[i] = arr[i], arr[pIndex]
pIndex += 1
arr[end], arr[pIndex] = arr[pIndex], arr[end]
return pIndex
def quickSort(self, arr, start, end):
"""
: arr, start, end: TODO
:returns: TODO
Avg T: O(n log n)
Worst T: O(n^2)
S: O(1) -> in place
"""
if start < end:
pIndex = self.partition(arr, start, end)
self.quickSort(arr, start, pIndex - 1)
self.quickSort(arr, pIndex + 1, end)
if __name__ == "__main__":
qs = Sort()
arr = [3, 4, 6, 7, 8, 4, 5, 3, 6, 7, 9, 0, 2, 1, 3, 4, 5,
34, 234, 5, 2, 54, 34, 375, 3452, 46, 75, 35, 24, 30]
qs.quickSort(arr, 0, len(arr) - 1)
print(arr)
|
StarcoderdataPython
|
3259096
|
from pathlib import Path
p = Path('rootfiles')
print(p.is_dir()) # True
print(p.is_file()) # False
print(p.is_absolute()) # False
print(p.resolve()) # /Users/yuto/VS/root_lecture/macros/hamada/rootfiles
data1_path = p / 'data1.root'
print(data1_path.as_posix()) # rootfiles/data1.root
print(data1_path.name) # data1.root
print(data1_path.stem) # data1
print(data1_path.suffix) # .root
|
StarcoderdataPython
|
12845546
|
from distutils.core import setup
setup(
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mephizzle/python-funkyfunc',
name='FunkyFunk',
version='0.0.2-dev',
packages=['funkyfunc'],
license='Apache 2.0',
long_description=open('README.txt').read(),
)
|
StarcoderdataPython
|
53414
|
"""
File: tools.py
"""
import random
import time
def getRandomList(n):
"""Returns a list of unique random numbers in the
range 0..n-1"""
items = list(range(n))
random.shuffle(items)
return items
def compare(titleList, functionList, sizeList,
dataSet=lambda x: x, counter=None, compareType="time"):
"""Runs a comparison test between the functions in functionList."""
print()
# Print a header indicating what value is being compared
print(compareType.title().center(25 + (12 * (len(titleList) - 1)) + 1, "-") + "\n")
# Print the header for the table of runtimes
headerString = "{:>25s}" + "{:>12s}" * (len(titleList) - 1) + "\n"
print(headerString.format(*titleList))
# Testing set
for size in sizeList:
# Print the lefthand label of the table
print(" Size: {:>5d} ".format(size), end="", flush=True)
# Test each function
for function in functionList:
# Create the data set
data = dataSet(size)
# When did we start the test
startTime = time.time()
# Detect a counter variable
if counter:
# Reset the counter
for key in counter.keys():
counter[key] = 0
function(data, counter)
else:
function(data)
# When did we end the test
endTime = time.time()
# Display in nice formatting the compare type
if compareType == "time":
value = endTime - startTime
print("{:>12.4f}".format(value), end="", flush=True)
elif compareType in counter.keys():
value = counter[compareType]
print("{:>12d}".format(value), end="", flush=True)
else:
print("ERROR: Unknown compare type " + compareType)
return
print()
print()
def show(n, function, dataSet=lambda x: x):
"""Shows the data returned by function."""
print()
data = dataSet(n)
print(function(data))
print()
|
StarcoderdataPython
|
67893
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
#
# Copyright (C) 2012-2014 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Bob Database Driver entry-point for the NIST SRE 2012 database
"""
import os
import sys
from bob.db.base.driver import Interface as BaseInterface
# Driver API
# ==========
def dumplist(args):
"""Dumps lists of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=args.client,
groups=args.group
)
output = sys.stdout
if args.selftest:
from bob.db.base.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0
def checkfiles(args):
"""Checks existence of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects(protocol=args.protocol)
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension, add_side=False)):
good.append(f)
else:
bad.append(f)
# report
output = sys.stdout
if args.selftest:
from bob.db.base.utils import null
output = null()
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension, add_side=False),))
output.write('%d files (out of %d) were not found at "%s"\n' % \
(len(bad), len(r), args.directory))
else:
output.write('all OK\n')
return 0
def reverse(args):
"""Returns a list of file database identifiers given the path stems"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.base.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0
def path(args):
"""Returns a list of fully formed paths or stems given some file id"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.base.utils import null
output = null()
r = db.paths(args.id, prefix=args.directory, suffix=args.extension)
for path in r: output.write('%s\n' % path)
if not r: return 1
return 0
class Interface(BaseInterface):
def name(self):
return 'nist_sre12'
def version(self):
import pkg_resources # part of setuptools
return pkg_resources.require('bob.db.%s' % self.name())[0].version
def files(self):
from pkg_resources import resource_filename
raw_files = ('db.sql3',)
return [resource_filename(__name__, k) for k in raw_files]
def type(self):
return 'sqlite'
def add_commands(self, parser):
from . import __doc__ as docs
subparsers = self.setup_parser(parser,
"NIST SRE 2012 database", docs)
# the "create" action from a submodule
from .create import add_command as create_command
create_command(subparsers)
from .query import Database
import argparse
db = Database()
# the "dumplist" action
parser = subparsers.add_parser('dumplist', help=dumplist.__doc__)
parser.add_argument('-d', '--directory', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('-p', '--protocol', help="if given, limits the dump to a particular subset of the data that corresponds to the given protocol", choices=db.protocol_names() if db.is_valid() else ())
parser.add_argument('-u', '--purpose', help="if given, this value will limit the output files to those designed for the given purposes.", choices=db.purposes() if db.is_valid() else ())
parser.add_argument('-g', '--group', help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.groups() if db.is_valid() else ())
parser.add_argument('-C', '--client', type=int, help="if given, this value will limit the output files to those belonging to the given client.", choices=db.model_ids() if db.is_valid() else ())
parser.add_argument('-c', '--class', dest="sclass", help="if given, this value will limit the output files to those belonging to the given classes.", choices=('client', 'impostor'))
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=dumplist) #action
# the "checkfiles" action
parser = subparsers.add_parser('checkfiles', help=checkfiles.__doc__)
parser.add_argument('-p', '--protocol', help="Gives the data for the given protocol.", choices=db.protocol_names() if db.is_valid() else ())
parser.add_argument('-d', '--directory', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=checkfiles) #action
# adds the "reverse" command
parser = subparsers.add_parser('reverse', help=reverse.__doc__)
parser.add_argument('path', nargs='+', help="one or more path stems to look up. If you provide more than one, files which cannot be reversed will be omitted from the output.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=reverse) #action
# adds the "path" command
parser = subparsers.add_parser('path', help=path.__doc__)
parser.add_argument('-d', '--directory', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('id', nargs='+', type=int, help="one or more file ids to look up. If you provide more than one, files which cannot be found will be omitted from the output. If you provide a single id to lookup, an error message will be printed if the id does not exist in the database. The exit status will be non-zero in such case.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=path) #action
|
StarcoderdataPython
|
11322894
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Limited mock of google-cloud-sdk for tests
"""
from io import BytesIO
from google.cloud.logging.entries import StructEntry
from google.cloud.logging.resource import Resource
from google.oauth2.credentials import Credentials
from mrjob.fs.gcs import parse_gcs_uri
from .dataproc import MockGoogleDataprocClusterClient
from .dataproc import MockGoogleDataprocJobClient
from .logging import MockGoogleLoggingClient
from .storage import MockGoogleStorageClient
from tests.mr_two_step_job import MRTwoStepJob
from tests.py2 import Mock
from tests.py2 import patch
from tests.sandbox import SandboxedTestCase
_TEST_PROJECT = 'test-mrjob:test-project'
class MockGoogleTestCase(SandboxedTestCase):
def setUp(self):
super(MockGoogleTestCase, self).setUp()
# maps (project_id, region, cluster_name) to a
# google.cloud.dataproc_v1beta2.types.Cluster
self.mock_clusters = {}
# maps (project_id, region, job_name) to a
# google.cloud.dataproc_v1beta2.types.Job
self.mock_jobs = {}
# set this to False to make jobs ERROR
self.mock_jobs_succeed = True
# a list of StructEntry objects for mock logging client to return
self.mock_log_entries = []
# mock OAuth token, returned by mock google.auth.default()
self.mock_token = '<PASSWORD>'
# mock project ID, returned by mock google.auth.default()
self.mock_project_id = 'mock-project-12345'
# Maps bucket name to a dictionary with the key
# *blobs*. *blobs* maps object name to
# a dictionary with the key *data*, which is
# a bytestring.
self.mock_gcs_fs = {}
self.start(patch('google.api_core.grpc_helpers.create_channel',
self.create_channel))
self.start(patch('google.auth.default', self.auth_default))
self.start(patch(
'google.cloud.dataproc_v1beta2.ClusterControllerClient',
self.cluster_client))
self.start(patch('google.cloud.dataproc_v1beta2.JobControllerClient',
self.job_client))
self.start(patch('google.cloud.logging.Client',
self.logging_client))
self.start(patch('google.cloud.storage.client.Client',
self.storage_client))
self.start(patch('time.sleep'))
def auth_default(self, scopes=None):
credentials = Credentials(self.mock_token, scopes=scopes)
return (credentials, self.mock_project_id)
def create_channel(self, target, credentials=None):
channel = Mock()
channel._channel = Mock()
channel._channel.target = Mock(return_value=target)
return channel
def cluster_client(self, channel=None, credentials=None):
return MockGoogleDataprocClusterClient(
channel=channel,
credentials=credentials,
mock_clusters=self.mock_clusters,
mock_gcs_fs=self.mock_gcs_fs,
mock_jobs=self.mock_jobs,
mock_jobs_succeed=self.mock_jobs_succeed,
)
def job_client(self, channel=None, credentials=None):
return MockGoogleDataprocJobClient(
channel=channel,
credentials=credentials,
mock_clusters=self.mock_clusters,
mock_gcs_fs=self.mock_gcs_fs,
mock_jobs=self.mock_jobs,
mock_jobs_succeed=self.mock_jobs_succeed,
)
def logging_client(self, project=None, credentials=None):
return MockGoogleLoggingClient(
credentials=credentials,
mock_log_entries=self.mock_log_entries,
project=project,
)
def storage_client(self, project=None, credentials=None):
return MockGoogleStorageClient(mock_gcs_fs=self.mock_gcs_fs)
def add_mock_log_entry(
self, payload, logger, insert_id=None, timestamp=None,
labels=None, severity=None, http_request=None, resource=None):
if isinstance(resource, dict):
resource = Resource(**resource)
entry = StructEntry(
http_request=http_request,
insert_id=insert_id,
labels=labels,
logger=logger,
payload=payload,
resource=resource,
severity=severity,
timestamp=timestamp,
)
self.mock_log_entries.append(entry)
def make_runner(self, *args):
"""create a dummy job, and call make_runner() on it.
Use this in a with block:
with self.make_runner() as runner:
...
"""
stdin = BytesIO(b'foo\nbar\n')
mr_job = MRTwoStepJob(['-r', 'dataproc'] + list(args))
mr_job.sandbox(stdin=stdin)
return mr_job.make_runner()
def put_gcs_multi(self, gcs_uri_to_data_map):
client = self.storage_client()
for uri, data in gcs_uri_to_data_map.items():
bucket_name, blob_name = parse_gcs_uri(uri)
bucket = client.bucket(bucket_name)
if not bucket.exists():
bucket.create()
blob = bucket.blob(blob_name)
blob.upload_from_string(data)
def put_job_output_parts(self, dataproc_runner, raw_parts):
"""Generate fake output on GCS for the given Dataproc runner."""
assert type(raw_parts) is list
base_uri = dataproc_runner.get_output_dir()
gcs_multi_dict = dict()
for part_num, part_data in enumerate(raw_parts):
gcs_uri = base_uri + 'part-%05d' % part_num
gcs_multi_dict[gcs_uri] = part_data
self.put_gcs_multi(gcs_multi_dict)
|
StarcoderdataPython
|
8196080
|
import time
from timeloop import Timeloop
from datetime import timedelta
tl = Timeloop()
@tl.job(interval=timedelta(seconds=2))
def card_loop():
print ("checking...")
|
StarcoderdataPython
|
333973
|
from django.urls import path, include
urlpatterns = [
path('halo/', include('halo.urls')),
]
|
StarcoderdataPython
|
5095750
|
# import files
from mesh import MeshingAlg
from findNearest import findNearest
from redBalltracking import RedBall
from faceDetector import FaceDetector
from gazeBehaviour import GazeBehaviour
# import necessary libraries
from collections import deque
import numpy as np
import cv2
import csv
import os
import argparse
import imutils
import logging as log
dir = 'input'
directory = os.fsencode(dir)
for file in os.listdir(directory):
filename = os.fsdecode(file)
cap = cv2.VideoCapture(dir+'/'+filename+'/world_viz.mp4')
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
args = vars(ap.parse_args())
pts = deque(maxlen=args["buffer"])
mesh = MeshingAlg()
ballTracking = RedBall()
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
log.basicConfig(filename='faceDetected.log', level=log.INFO)
anterior = 0
face = FaceDetector()
print("Preparing Data...")
knownFaces, knownLabels = face.prepare_training_data("training-data", faceCascade)
print("Data prepared")
# create our LBPH face recognizer
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(knownFaces, np.array(knownLabels))
timestamps_gaze = list()
norm_pos_x = list()
norm_pos_y = list()
gaze = GazeBehaviour()
#print(filename)
f = gaze.open(filename)
with open(dir+'/'+filename+'/gaze_postions.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
timestamps_gaze.append(float(row['timestamp']))
norm_pos_x.append(row['norm_pos_x'])
norm_pos_y.append(row['norm_pos_y'])
# print(row['timestamp'], row['norm_pos_x'], row['norm_pos_y'])
# print(timestamps_gaze[2])
# print(norm_pos_y[2]) # dont forget it starts with 0
# print(norm_pos_x[2])
timestamps = np.load(dir+'/'+filename+'/world_viz_timestamps.npy')
i = 0
while i < length:
ret, frame = cap.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if frame is not None:
frame = imutils.resize(frame, width=750)
height, width, channels = frame.shape
frame, markers = mesh.mesh(frame)
frame, pts, ball = ballTracking.tracking(frame, pts, args)
anterior, faces, facesTrained = face.detecting(frame, anterior, faceCascade)
labels = face.predict(frame, face_recognizer, faces, facesTrained)
# calculate the nearest timestamp for the current frame
time = timestamps[i]
time_close, ind = findNearest(timestamps_gaze, float(time))
# use the x, y position of the closest timestamp norm_pos_*
pos_x = norm_pos_x[ind]
pos_y = norm_pos_y[ind]
cv2.circle(frame, (int(float(pos_x)*width), int(height - int(float(pos_y)*height))), 10, (0, 255, 1),
thickness=5, lineType=8, shift=0) # draw circle
fixation = [(int(float(pos_x)*width)), int(height - int(float(pos_y)*height))]
# check the gaze behaviour
if len(ball) is not 0:
gaze.record(time_close, markers, ball, faces, fixation, labels, f)
cv2.imshow('frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
i = i + 1
#cv2.waitKey(0)
gaze.close(f)
cap.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1673603
|
# Generated by Django 2.2.4 on 2019-08-09 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0002_auto_20190809_1409'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='year',
field=models.IntegerField(),
),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.