max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tools/telemetry/telemetry/value/value_backcompat.py | iplo/Chain | 231 | 12725603 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Backward compatibility for old results API.
This module helps convert the old PageMeasurementResults API into the new
style one. This exists as a bridging solution so we can change the underlying
implementation and update the PageMeasurementResults API once we know the
underlying implementation is solid.
"""
from telemetry import value as value_module
from telemetry.value import histogram
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
def ConvertOldCallingConventionToValue(page, trace_name, units,
value, chart_name, data_type):
value_name = value_module.ValueNameFromTraceAndChartName(
trace_name, chart_name)
if data_type == 'default':
if isinstance(value, list):
return list_of_scalar_values.ListOfScalarValues(
page, value_name, units, value, important=True)
else:
return scalar.ScalarValue(page, value_name, units,
value, important=True)
elif data_type == 'unimportant':
if isinstance(value, list):
return list_of_scalar_values.ListOfScalarValues(
page, value_name, units, value, important=False)
else:
return scalar.ScalarValue(page, value_name, units,
value, important=False)
elif data_type == 'histogram':
assert isinstance(value, basestring)
return histogram.HistogramValue(
page, value_name, units, raw_value_json=value, important=True)
elif data_type == 'unimportant-histogram':
assert isinstance(value, basestring)
return histogram.HistogramValue(
page, value_name, units, raw_value_json=value, important=False)
elif data_type == 'informational':
raise NotImplementedError()
else:
raise ValueError('Unrecognized data type %s', data_type)
|
contrib/conflict_resolution/resolve_interactively.py | telotortium/vdirsyncer | 888 | 12725623 | #!/usr/bin/env python3
"""Ask user to resolve a vdirsyncer sync conflict interactively.
Needs a way to ask the user.
The use of https://apps.kde.org/kdialog/ for GNU/Linix is hardcoded.
Depends on python>3.5 and KDialog.
Usage:
Ensure the file executable and use it in the vdirsyncer.conf file, e.g.
conflict_resolution = ["command", "/home/bern/vdirsyncer/resolve_interactively.py"]
This file is Free Software under the following license:
SPDX-License-Identifier: BSD-3-Clause
SPDX-FileCopyrightText: 2021 Intevation GmbH <https://intevation.de>
Author: <<EMAIL>>
"""
import re
import subprocess
import sys
from pathlib import Path
KDIALOG = "/usr/bin/kdialog"
SUMMARY_PATTERN = re.compile("^(SUMMARY:.*)$", re.MULTILINE)
def get_summary(icalendar_text: str):
"""Get the first SUMMARY: line from an iCalendar text.
Do not care about the line being continued.
"""
match = re.search(SUMMARY_PATTERN, icalendar_text)
return match[1]
def main(ical1_filename, ical2_filename):
ical1 = ical1_filename.read_text()
ical2 = ical2_filename.read_text()
additional_args = ["--yes-label", "take first"] # return code == 0
additional_args += ["--no-label", "take second"] # return code == 1
additional_args += ["--cancel-label", "do not resolve"] # return code == 2
r = subprocess.run(
args=[
KDIALOG,
"--warningyesnocancel",
"There was a sync conflict, do you prefer the first entry: \n"
f"{get_summary(ical1)}...\n(full contents: {ical1_filename})\n\n"
"or the second entry:\n"
f"{get_summary(ical2)}...\n(full contents: {ical2_filename})?",
]
+ additional_args
)
if r.returncode == 2:
# cancel was pressed
return # shall lead to items not changed, because not copied
if r.returncode == 0:
# we want to take the first item, so overwrite the second
ical2_filename.write_text(ical1)
else: # r.returncode == 1, we want the second item, so overwrite the first
ical1_filename.write_text(ical2)
if len(sys.argv) != 3:
sys.stdout.write(__doc__)
else:
main(Path(sys.argv[1]), Path(sys.argv[2]))
|
04_transformer_tutorial_2nd_part/BERT_tutorial/dataset/wiki_dataset.py | loveagri/a_journey_into_math_of_ml | 1,590 | 12725674 | from torch.utils.data import Dataset
import tqdm
import json
import torch
import random
import numpy as np
from sklearn.utils import shuffle
class BERTDataset(Dataset):
def __init__(self, corpus_path, word2idx_path, seq_len, hidden_dim=384, on_memory=True):
# hidden dimension for positional encoding
self.hidden_dim = hidden_dim
# define path of dicts
self.word2idx_path = word2idx_path
# define max length
self.seq_len = seq_len
# load whole corpus at once or not
self.on_memory = on_memory
# directory of corpus dataset
self.corpus_path = corpus_path
# define special symbols
self.pad_index = 0
self.unk_index = 1
self.cls_index = 2
self.sep_index = 3
self.mask_index = 4
self.num_index = 5
# 加载字典
with open(word2idx_path, "r", encoding="utf-8") as f:
self.word2idx = json.load(f)
# 加载语料
with open(corpus_path, "r", encoding="utf-8") as f:
if not on_memory:
# 如果不将数据集直接加载到内存, 则需先确定语料行数
self.corpus_lines = 0
for _ in tqdm.tqdm(f, desc="Loading Dataset"):
self.corpus_lines += 1
if on_memory:
# 将数据集全部加载到内存
self.lines = [eval(line) for line in tqdm.tqdm(f, desc="Loading Dataset")]
self.corpus_lines = len(self.lines)
if not on_memory:
# 如果不全部加载到内存, 首先打开语料
self.file = open(corpus_path, "r", encoding="utf-8")
# 然后再打开同样的语料, 用来抽取负样本
self.random_file = open(corpus_path, "r", encoding="utf-8")
# 下面是为了错位抽取负样本
for _ in range(np.random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):
self.random_file.__next__()
def __len__(self):
return self.corpus_lines
def __getitem__(self, item):
t1, t2, is_next_label = self.random_sent(item)
t1_random, t1_label = self.random_char(t1)
t2_random, t2_label = self.random_char(t2)
t1 = [self.cls_index] + t1_random + [self.sep_index]
t2 = t2_random + [self.sep_index]
t1_label = [self.pad_index] + t1_label + [self.pad_index]
t2_label = t2_label + [self.pad_index]
segment_label = ([0 for _ in range(len(t1))] + [1 for _ in range(len(t2))])[:self.seq_len]
bert_input = (t1 + t2)[:self.seq_len]
bert_label = (t1_label + t2_label)[:self.seq_len]
output = {"bert_input": torch.tensor(bert_input),
"bert_label": torch.tensor(bert_label),
"segment_label": torch.tensor(segment_label),
"is_next": torch.tensor([is_next_label])}
return output
def tokenize_char(self, segments):
return [self.word2idx.get(char, self.unk_index) for char in segments]
def random_char(self, sentence):
char_tokens_ = list(sentence)
char_tokens = self.tokenize_char(char_tokens_)
output_label = []
for i, token in enumerate(char_tokens):
prob = random.random()
if prob < 0.30:
prob /= 0.30
output_label.append(char_tokens[i])
# 80% randomly change token to mask token
if prob < 0.8:
char_tokens[i] = self.mask_index
# 10% randomly change token to random token
elif prob < 0.9:
char_tokens[i] = random.randrange(len(self.word2idx))
else:
output_label.append(0)
return char_tokens, output_label
def random_sent(self, index):
t1, t2 = self.get_corpus_line(index)
# output_text, label(isNotNext:0, isNext:1)
if random.random() > 0.5:
return t1, t2, 1
else:
return t1, self.get_random_line(), 0
def get_corpus_line(self, item):
if self.on_memory:
return self.lines[item]["text1"], self.lines[item]["text2"]
else:
line = self.file.__next__()
if line is None:
self.file.close()
self.file = open(self.corpus_path, "r", encoding="utf-8")
line = self.file.__next__()
line = eval(line)
t1, t2 = line["text1"], line["text2"]
return t1, t2
def get_random_line(self):
if self.on_memory:
return self.lines[random.randrange(len(self.lines))]["text2"]
line = self.random_file.__next__()
if line is None:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding="utf-8")
for _ in range(np.random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):
self.random_file.__next__()
line = self.random_file.__next__()
return eval(line)["text2"] |
IkaLog.py | fetus-hina/IkaLog | 285 | 12725693 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ikalog.utils import Localization, IkaUtils
Localization.print_language_settings()
import argparse
import signal
import sys
import time
from ikalog import inputs
from ikalog.engine import IkaEngine
from ikalog.utils import config_loader
def signal_handler(num, frame):
IkaUtils.dprint('IkaLog: got signal %d' % num)
if num == 2:
engine.stop()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', dest='input', type=str,
choices=['DirectShow', 'CVCapture', 'ScreenCapture',
'AVFoundationCapture', 'CVFile'])
parser.add_argument('--input_file', '-f', dest='input_file', type=str,
nargs='*', help='Input video file. '
'Other flags can refer this flag as __INPUT_FILE__')
parser.add_argument('--output_json', '--json',
dest='output_json', type=str)
parser.add_argument('--output_description', '--desc',
dest='output_description', type=str)
parser.add_argument('--statink_payload',
dest='statink_payload', type=str,
help='Payload file to stat.ink. '
'If this is specified, the data is not uploaded.')
parser.add_argument('--profile', dest='profile', action='store_true',
default=False)
parser.add_argument('--time', '-t', dest='time', type=str)
parser.add_argument('--time_msec', dest='time_msec', type=int)
parser.add_argument('--video_id', dest='video_id', type=str)
parser.add_argument('--keep_alive', action='store_true', default=False,
help='Do not exit on EOFError with no next inputs.')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False)
return vars(parser.parse_args())
def get_pos_msec(args):
if args['time_msec']:
return args['time_msec']
elif args['time']:
minute, sec = args['time'].split(':')
return (int(minute) * 60 + int(sec)) * 1000
else:
return 0
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
args = get_args()
capture, output_plugins = config_loader.config(args)
capture.set_pos_msec(get_pos_msec(args))
engine = IkaEngine(enable_profile=args.get('profile'),
keep_alive=args.get('keep_alive'))
engine.pause(False)
engine.set_capture(capture)
engine.set_plugins(output_plugins)
for op in output_plugins:
engine.enable_plugin(op)
engine.close_session_at_eof = True
IkaUtils.dprint('IkaLog: start.')
engine.run()
IkaUtils.dprint('bye!')
|
jill/utils/mount_utils.py | KronosTheLate/jill.py | 162 | 12725710 | from tempfile import mkdtemp
import os
import sys
import subprocess
import shutil
import time
class Mounter:
def __init__(self, src_path, mount_root="."):
self.src_path = src_path
self.mount_root = os.path.abspath(mount_root)
mount_name = os.path.splitext(os.path.split(self.src_path)[1])[0]
self.mount_point = os.path.join(self.mount_root, mount_name)
class TarMounter(Mounter):
def __enter__(self):
self.tempdir = mkdtemp()
# this only supports compressed tarball: *.tar.gz and *.tgz
args = ["tar", "-zxf", self.src_path, "-C", self.tempdir]
extra_args = ["--strip-components", "1"]
args.extend(extra_args)
is_success = subprocess.run(args).returncode == 0
if is_success:
return self.tempdir
raise IOError(f"could not untar {self.src_path}")
def __exit__(self, type, value, tb):
shutil.rmtree(self.tempdir)
class DmgMounter(Mounter):
def __init__(self, src_path, mount_root=".", verbose=False, max_try=5):
super(DmgMounter, self).__init__(src_path, mount_root)
self.extra_args = ["-mount", "required"]
self.max_try = max_try
if not verbose:
self.extra_args.append("-quiet")
@staticmethod
def umount(mount_point):
if os.path.exists(mount_point):
rst = subprocess.run(["umount", mount_point])
return not rst.returncode
return True
def __enter__(self):
assert sys.platform == "darwin"
args = ["hdiutil", "attach", self.src_path,
"-mountpoint", self.mount_point]
args.extend(self.extra_args)
DmgMounter.umount(self.mount_point)
# the mount might fail for unknown reason,
# set a max_try here to work it around
cur_try = 1
while cur_try <= self.max_try:
is_success = subprocess.run(args).returncode == 0
if is_success:
return self.mount_point
time.sleep(0.5)
cur_try += 1
raise IOError(f"{self.src_path} is not mounted successfully")
def __exit__(self, type, value, tb):
DmgMounter.umount(self.mount_point)
|
tests/exhaustive/ncaab_tests.py | MArtinherz/sportsipy | 221 | 12725713 | <reponame>MArtinherz/sportsipy<gh_stars>100-1000
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
from sportsipy.ncaab.conferences import Conferences
from sportsipy.ncaab.rankings import Rankings
from sportsipy.ncaab.teams import Teams
for team in Teams():
print(team.name)
for player in team.roster.players:
print(player.name.encode('utf-8'))
for game in team.schedule:
print(game.dataframe)
print(game.dataframe_extended)
conferences = Conferences()
print(conferences.conferences)
print(conferences.team_conference)
rankings = Rankings()
print(rankings.current)
print(rankings.current_extended)
print(rankings.complete)
|
tests/api/inbound_eligibility/test_inbound_eligibility.py | Camille-cmd/python-amazon-sp-api | 213 | 12725732 | <filename>tests/api/inbound_eligibility/test_inbound_eligibility.py
from sp_api.api import FbaInboundEligibility
def test_inbound_eligibility():
res = FbaInboundEligibility().get_item_eligibility_preview(asin='TEST_CASE_200', program="INBOUND")
assert res.payload is not None
|
components/isceobj/IsceProc/runResamp_image.py | vincentschut/isce2 | 1,133 | 12725746 | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2013 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Authors: <NAME>, <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Comment: Adapted from InsarProc/runResamp_image.py
import os
import logging
import isceobj
import stdproc
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
logger = logging.getLogger('isce.isceProc.runResamp_image')
def runResamp_image(self):
refPol = self._isce.refPol
stdWriter = self._stdWriter
dopplerCentroid = self._isce.dopplerCentroid
looks = self._isce.numberLooks
numFitCoeff = self._isce.numberFitCoefficients
offsetImageName = self._isce.offsetImageName
pixelSpacing = self._isce.slantRangePixelSpacing
lines = self._isce.numberResampLines
for sceneid1, sceneid2 in self._isce.pairsToCoreg:
pair = (sceneid1, sceneid2)
imageSlc1 = self._isce.slcImages[sceneid1][refPol]
frame1 = self._isce.frames[sceneid1][refPol]
instrument = frame1.getInstrument()
offsetField = self._isce.refinedOffsetFields[pair] #offsetField is the same for all pols
imageSlc2 = self._isce.slcImages[sceneid2][refPol]
catalog = isceobj.Catalog.createCatalog(self._isce.procDoc.name)
sid = self._isce.formatname(pair)
offsetFilename = os.path.join(self.getoutputdir(sceneid1, sceneid2), self._isce.formatname(pair, ext=offsetImageName))
imageAz, imageRn = run(imageSlc1, imageSlc2, offsetField, instrument, dopplerCentroid, looks, lines, numFitCoeff, pixelSpacing, offsetFilename, stdWriter, catalog=catalog, sceneid=sid)
self._isce.offsetAzimuthImages[pair] = imageAz
self._isce.offsetRangeImages[pair] = imageRn
def run(imageSlc1, imageSlc2, offsetField, instrument, dopplerCentroid, looks, lines, numFitCoeff, pixelSpacing, offsetFilename, stdWriter, catalog=None, sceneid='NO_ID'):
widthSlc = max(imageSlc1.getWidth(), imageSlc2.getWidth())
dopplerCoeff = dopplerCentroid.getDopplerCoefficients(inHz=False)
path, filename = os.path.split(offsetFilename)
offsetAz = os.path.join(path, 'azimuth_' + filename)
offsetRn = os.path.join(path, 'range_' + filename)
widthOffset = int(widthSlc / looks)
imageAz = isceobj.createOffsetImage()
imageAz.setFilename(offsetAz)
imageAz.setWidth(widthOffset)
imageRn = isceobj.createOffsetImage()
imageRn.setFilename(offsetRn)
imageRn.setWidth(widthOffset)
objAz = isceobj.createOffsetImage()
objRn = isceobj.createOffsetImage()
IU.copyAttributes(imageAz, objAz)
IU.copyAttributes(imageRn, objRn)
objAz.setAccessMode('write')
objAz.createImage()
objRn.setAccessMode('write')
objRn.createImage()
objResamp_image = stdproc.createResamp_image()
objResamp_image.wireInputPort(name='offsets', object=offsetField)
objResamp_image.wireInputPort(name='instrument', object=instrument)
objResamp_image.setSlantRangePixelSpacing(pixelSpacing)
objResamp_image.setDopplerCentroidCoefficients(dopplerCoeff)
objResamp_image.setNumberLooks(looks)
objResamp_image.setNumberLines(lines)
objResamp_image.setNumberRangeBin(widthSlc)
objResamp_image.setNumberFitCoefficients(numFitCoeff)
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
objResamp_image.stdWriter = stdWriter.set_file_tags("resamp_image",
"log",
"err",
"out")
objResamp_image.resamp_image(objRn, objAz)
if catalog is not None:
# Record the inputs and outputs
isceobj.Catalog.recordInputsAndOutputs(catalog, objResamp_image,
"runResamp_image.%s" % sceneid,
logger,
"runResamp_image.%s" % sceneid)
objRn.finalizeImage()
objAz.finalizeImage()
return imageAz, imageRn
|
tests/test_08_addons/test_815_dxf_browser.py | jkjt/ezdxf | 515 | 12725751 | # Copyright (c) 2021, <NAME>
# License: MIT License
import pytest
pytest.importorskip("PySide6")
from io import StringIO
import math
from ezdxf.lldxf.tags import Tags, DXFTag
from ezdxf.lldxf.loader import load_dxf_structure
from ezdxf.lldxf.tagger import ascii_tags_loader
from ezdxf.addons.browser import DXFTagsModel, DXFStructureModel, DXFDocument
from ezdxf.addons.browser.tags import compile_tags
from ezdxf.addons.browser.data import (
EntityIndex,
EntityHistory,
SearchIndex,
)
from ezdxf.addons.xqt import Qt, QModelIndex
def txt2tags(s: str) -> Tags:
return Tags(ascii_tags_loader(StringIO(s), skip_comments=False))
NAN = float("nan")
# noinspection PyMissingConstructor
class ModelIndex(QModelIndex):
"""Proxy"""
def __init__(self, row, col):
self._row = row
self._col = col
def row(self):
return self._row
def column(self):
return self._col
class TestDXFTagsModel:
def tags(self):
return txt2tags(POINT)
@pytest.fixture
def model(self):
return DXFTagsModel(self.tags())
def test_fixed_column_count(self, model):
assert model.columnCount() == 3
def test_row_count(self, model):
assert model.rowCount() == len(compile_tags(self.tags()))
def test_render_display_role(self, model):
assert model.data(ModelIndex(0, 0), role=Qt.DisplayRole) == "0"
assert model.data(ModelIndex(0, 1), role=Qt.DisplayRole) == "<ctrl>"
assert model.data(ModelIndex(0, 2), role=Qt.DisplayRole) == "POINT"
POINT = """0
POINT
5
0
330
0
100
AcDbEntity
8
0
100
AcDbPoint
10
0.0
20
0.0
30
0.0
"""
def test_setup_dxf_structure_model():
sections = load_dxf_structure(txt2tags(ENTITIES))
doc = DXFDocument(sections)
model = DXFStructureModel("ez.dxf", doc)
parent = model.item(0, 0)
assert parent.data(Qt.DisplayRole) == "ez.dxf"
assert "ENTITIES" in parent.child(0, 0).data(Qt.DisplayRole)
# one level down
parent = parent.child(0, 0)
assert "LINE" in parent.child(0, 0).data(Qt.DisplayRole)
assert "LINE" in parent.child(1, 0).data(Qt.DisplayRole)
class TestDXFDocument:
@pytest.fixture
def doc(self):
sections = load_dxf_structure(txt2tags(ENTITIES))
return DXFDocument(sections)
def test_get_entity_returns_entity_tags(self, doc):
entity = doc.get_entity("100")
assert entity[0] == (0, "LINE")
def test_get_entity_by_invalid_handle_returns_none(self, doc):
assert doc.get_entity("XXX") is None
def test_get_start_line_number_for_entity(self, doc):
entity = doc.get_entity("101")
assert doc.get_line_number(entity) == 9
def test_get_entity_by_line_number(self, doc):
entity = doc.get_entity("101")
assert doc.get_entity_at_line(9) is entity
assert doc.get_entity_at_line(10) is entity
assert (
doc.get_entity_at_line(99) is entity
), "should return the last entity"
class TestTagCompiler:
def test_compile_single_int(self):
tags = compile_tags(txt2tags("70\n3"))
assert tags[0] == (70, 3)
def test_compile_invalid_int_to_str(self):
tags = compile_tags(txt2tags("70\nx"))
assert tags[0] == (70, "x")
def test_compile_single_float(self):
tags = compile_tags(txt2tags("40\n3.14"))
assert tags[0] == (40, 3.14)
def test_compile_invalid_float_to_str(self):
tags = compile_tags(txt2tags("40\nx.14"))
assert tags[0] == (40, "x.14")
def test_compile_single_2d_point(self):
tags = compile_tags(txt2tags("10\n1.2\n20\n2.3"))
assert tags[0] == (10, (1.2, 2.3))
def test_compile_two_2d_points(self):
tags = compile_tags(txt2tags("10\n1.1\n20\n1.2\n10\n2.1\n20\n2.2"))
assert tags[0] == (10, (1.1, 1.2))
assert tags[1] == (10, (2.1, 2.2))
def test_compile_nan_coords_2d(self):
tags = compile_tags(txt2tags("10\nx.2\n20\n2.3"))
assert math.isnan(tags[0].value[0])
def test_compile_single_3d_point(self):
tags = compile_tags(txt2tags("10\n1.2\n20\n2.3\n30\n3.4"))
assert tags[0] == (10, (1.2, 2.3, 3.4))
def test_compile_nan_coords_3d(self):
tags = compile_tags(txt2tags("10\n1\n20\n2\n30\nx"))
assert math.isnan(tags[0].value[2])
def test_compile_single_group_code_10(self):
tags = compile_tags(txt2tags("10\n1.1"))
assert tags[0] == (10, 1.1)
def test_compile_two_group_code_10(self):
tags = compile_tags(txt2tags("10\n1.1\n10\n2.2"))
assert tags[0] == (10, 1.1)
assert tags[1] == (10, 2.2)
def test_compile_swapped_coords(self):
tags = compile_tags(txt2tags("20\n2.2\n10\n1.1"))
assert tags[0] == (20, 2.2), "expected coords as single tags"
assert tags[1] == (10, 1.1), "expected coords as single tags"
ENTITIES = """0
SECTION
2
ENTITIES
0
LINE
5
100
0
LINE
5
101
0
ENDSEC
0
EOF
"""
SECTIONS = """0
SECTION
2
HEADER
9
$ACADVER
1
AC1032
0
ENDSEC
0
SECTION
2
ENTITIES
0
LINE
5
100
0
ENDSEC
0
EOF
"""
class TestEntityIndex:
@pytest.fixture(scope="class")
def index(self):
data = {
"ENTITIES": [
Tags([DXFTag(0, "ENTITY1"), DXFTag(5, "F001")]),
Tags([DXFTag(0, "ENTITY2"), DXFTag(5, "F002")]),
Tags([DXFTag(0, "ENTITY3"), DXFTag(5, "F003")]),
Tags([DXFTag(0, "ENTITY4"), DXFTag(5, "F004")]),
# last entity without handle, has dummy handle "*1"
Tags([DXFTag(0, "ENTITY5"), DXFTag(1, "DATA")]),
]
}
return EntityIndex(data)
def test_contains_all_entities(self, index):
assert "F001" in index
assert "F002" in index
assert "F003" in index
assert "F004" in index
assert "*1" in index, "expected dummy handle"
def test_get_entity_by_handle(self, index):
tags = index.get("F001")
assert tags[0] == (0, "ENTITY1")
def test_get_entity_by_dummy_handle(self, index):
tags = index.get("*1")
assert tags[0] == (0, "ENTITY5")
def test_get_handle_from_casted_tags(self, index):
entity = Tags(index.get("F001"))
assert index.get_handle(entity) == "F001"
def test_get_dummy_handle_from_casted_tags(self, index):
entity = Tags(index.get("*1"))
assert index.get_handle(entity) == "*1"
def test_get_next_entity(self, index):
e1 = index.get("F001")
e2 = index.get("F002")
assert index.next_entity(e1) is e2
def test_next_entity_of_last_entity_is_last_entity(self, index):
e1 = index.get("*1")
assert index.next_entity(e1) is e1
def test_get_prev_entity(self, index):
e1 = index.get("F001")
e2 = index.get("F002")
assert index.previous_entity(e2) is e1
def test_prev_entity_of_first_entity_is_first_entity(self, index):
e1 = index.get("F001")
assert index.previous_entity(e1) is e1
def test_max_line_number(self, index):
assert index.max_line_number == 20
def test_get_start_line_number(self, index):
e = index.get("F003")
assert index.get_start_line_for_entity(e) == 9
def test_get_start_line_number_for_dummy_handle(self, index):
e = index.get("*1")
assert index.get_start_line_for_entity(e) == 17
def test_entity_at_line(self, index):
e3 = index.get("F003")
assert index.get_entity_at_line(9) is e3
assert index.get_entity_at_line(10) is e3
def test_entity_at_line_for_dummy_handle(self, index):
e = index.get("*1")
assert index.get_entity_at_line(19) is e
assert index.get_entity_at_line(20) is e
def test_entity_index_adds_missing_endsec_tag():
# The function load_dxf_structure() throws the ENDSEC tag away.
# The entity indexer must take this issue into account!
sections = load_dxf_structure(txt2tags(SECTIONS))
index = EntityIndex(sections)
entity = index.get_entity_at_line(15)
assert entity.get_handle() == "100"
assert index.get_start_line_for_entity(entity) == 15
class TestEntityHistory:
@pytest.fixture
def history2(self):
history = EntityHistory()
history.append(Tags([DXFTag(1, "first")]))
history.append(Tags([DXFTag(2, "second")]))
return history
def test_setup_history(self):
history = EntityHistory()
assert len(history) == 0
assert history.index == 0
def test_empty_history_returns_none(self):
history = EntityHistory()
assert history.back() is None
assert history.forward() is None
def test_append_one_entity(self):
history = EntityHistory()
history.append(Tags())
assert len(history) == 1
assert history.index == 0
def test_append_two_entities(self):
history = EntityHistory()
history.append(Tags())
history.append(Tags())
assert len(history) == 2
assert history.index == 1
def test_go_back_in_history(self, history2):
first, second = history2.content()
assert history2.index == 1
assert history2.back() is first
assert len(history2) == 2, "entity is still in history"
assert history2.index == 0
def test_go_back_and_forward_in_history(self, history2):
first, second = history2.content()
assert history2.back() is first
assert history2.forward() is second
def test_append_should_add_time_travel_history(self, history2):
first, second = history2.content()
assert history2.back() is first # 1st time travel
assert history2.index == 0
assert history2.forward() is second # 2nd time travel
assert history2.index == 1
third = Tags([DXFTag(3, "third")])
history2.append(third)
assert history2.index == 4
# complete travel history
content = history2.content()
assert len(content) == 5
# time wraps -> append
assert content == [first, second, first, second, third]
SEARCH_EXAMPLE1 = """0
SEARCH1
8
LayerName1
62
7
"""
SEARCH_EXAMPLE2 = """0
SEARCH2
8
LayerName2
62
6
"""
class TestSearchIndex:
@pytest.fixture(scope="class")
def entities(self):
return [txt2tags(SEARCH_EXAMPLE1), txt2tags(SEARCH_EXAMPLE2)]
@pytest.fixture
def search(self, entities):
return SearchIndex(entities)
@staticmethod
def move_cursor_forward(s: SearchIndex, count: int):
for _ in range(count):
s.move_cursor_forward()
@staticmethod
def move_cursor_backward(s: SearchIndex, count: int):
for _ in range(count):
s.move_cursor_backward()
def test_valid_setup_and_default_settings(self, search):
assert len(search.entities) == 2
assert search.is_end_of_index is False
assert (
search.case_insensitive is True
), "should be case insensitive by default"
assert (
search.numbers is False
), "should not search in number tags by default"
def test_empty_search_index(self):
search_index = SearchIndex([])
assert search_index.is_end_of_index is True
def test_reset_cursor_forward(self, search):
search.reset_cursor(backward=False)
assert search.cursor() == (
0,
0,
), "cursor should be the first tag of the first entity"
assert search.is_end_of_index is False
def test_move_cursor_forward(self, search):
search.reset_cursor()
search.move_cursor_forward()
assert search.cursor() == (0, 1)
def test_move_cursor_forward_beyond_entity_border(self, search):
search.reset_cursor()
self.move_cursor_forward(search, 3)
assert search.cursor() == (1, 0)
def test_move_cursor_forward_to_the_end_of_index(self, search):
search.reset_cursor()
self.move_cursor_forward(search, 10)
assert search.is_end_of_index is True
assert search.cursor() == (
1,
2,
), "index should stop at the last tag of the last entity"
def test_reset_cursor_backward(self, search):
search.reset_cursor(backward=True)
assert search.cursor() == (
1,
2,
), "cursor should be the last tag of the last entity"
assert search.is_end_of_index is False
def test_move_cursor_backward(self, search):
search.reset_cursor(backward=True)
search.move_cursor_backward()
assert search.cursor() == (1, 1)
def test_move_cursor_backward_beyond_entity_border(self, search):
search.reset_cursor(backward=True)
self.move_cursor_backward(search, 3)
assert search.cursor() == (0, 2)
def test_move_cursor_backward_to_the_end_of_index(self, search):
search.reset_cursor()
self.move_cursor_backward(search, 10)
assert search.is_end_of_index is True
assert search.cursor() == (
0,
0,
), "index should stop at the first tag of the first entity"
def test_failing_search(self, search):
entity, index = search.find("XDATA")
assert entity is None
assert index == -1
assert search.is_end_of_index is True
def test_find_entity_type(self, search):
entity, index = search.find("SEARCH1")
assert entity is search.entities[0]
assert index == 0
def test_find_forward_entity_type(self, search):
search.find("SEARCH")
entity, index = search.find_forward()
assert entity is search.entities[1]
assert index == 0
def test_find_content(self, search):
entity, index = search.find("LayerName1")
assert entity is search.entities[0]
assert index == 1
def test_find_forward_content(self, search):
search.find("LayerName")
entity, index = search.find_forward()
assert entity is search.entities[1]
assert index == 1
def test_failing_find_forward_returns_none(self, search):
search.find("LayerName")
search.find_forward()
entity, index = search.find_forward()
assert entity is None
assert index == -1
def test_not_initiated_find_forward_returns_none(self, search):
entity, index = search.find_forward()
assert entity is None
assert index == -1
def test_case_insensitive_search(self, search):
search.case_insensitive = True
entity, index = search.find("LAYERNAME1")
assert entity is search.entities[0]
assert index == 1
def test_case_sensitive_search(self, search):
search.case_insensitive = False
entity, index = search.find("LAYERNAME1")
assert entity is None
def test_ignore_number_tags(self, search):
search.numbers = False
entity, index = search.find("6")
assert entity is None
def test_search_in_number_tags(self, search):
search.numbers = True
entity, index = search.find("6")
assert entity is search.entities[1]
assert index == 2
def test_failing_find_forward_stops_at_the_end(self, search):
assert search.find("XXX") is search.NOT_FOUND
assert search.is_end_of_index is True
def test_failing_find_backwards_stops_at_the_beginning(self, search):
assert search.find("XXX", backward=True) is search.NOT_FOUND
assert search.is_end_of_index is True
if __name__ == "__main__":
pytest.main([__file__])
|
python/ql/test/experimental/query-tests/Security-new-dataflow/CWE-078-py2/command_injection.py | vadi2/codeql | 4,036 | 12725785 | <reponame>vadi2/codeql
import os
import platform
import popen2
from flask import Flask, request
app = Flask(__name__)
@app.route("/python2-specific")
def python2_specific():
"""
These tests are mostly included to check for extra paths that can be generated if
we can track flow into the implementation of a stdlib function, and then to another sink.
See comment in query for more details.
"""
files = request.args.get("files", "")
os.popen2("ls " + files)
os.popen3("ls " + files)
os.popen4("ls " + files)
platform.popen("ls " + files)
popen2.popen2("ls " + files)
popen2.popen3("ls " + files)
popen2.popen4("ls " + files)
popen2.Popen3("ls " + files)
popen2.Popen4("ls " + files)
|
docs/components_page/components/progress/progress.py | imrehg/dash-bootstrap-components | 776 | 12725814 | import dash_bootstrap_components as dbc
progress = dbc.Progress(value=50)
|
test/compatibility/gpsd_test.py | quiet-oceans/libais | 161 | 12725819 | #!/usr/bin/env python
"""Tests for ais.compatibility.gpsd."""
import ais
import ais.compatibility.gpsd
import ais.stream
import unittest
import json
import os
import re
import subprocess
import six
from .. import testutils
known_problems = {
2: set(('turn', 'status_text')),
9: set(['speed']),
15: set(['mmsi2']),
17: set(('lat', 'lon')),
20: set((
'increment3', 'number3', 'offset3', 'timeout3',
'increment4', 'number4', 'offset4', 'timeout4',
)),
27: set(['status']),
}
class SingleMessageTestsTest(unittest.TestCase):
def setUp(self):
self.mangle = ais.compatibility.gpsd.Mangler()
def testMsg1(self):
fields = '!AIVDM,1,1,,B,169A91O005KbT4gUoUl9d;5j0D0U,0*2D'.split(',')
decoded = ais.decode(fields[5], int(fields[6][0]))
mangled = self.mangle(decoded)
expected = {
'type': 1,
'repeat': 0,
'mmsi': 412371205,
'status': 15,
'status_text': 'Not defined',
'turn': 0,
'speed': 0.5,
'accuracy': False,
'course': 248.0,
'heading': 354,
'second': 57,
'maneuver': 0,
'raim': False}
self.assertDictContainsSubset(expected, mangled)
# Float values will not match, so just test existence.
for field in ('lat', 'lon'):
self.assertIn(field, mangled)
def testMsg5LargeTypeAndCargo(self):
# Test based on this AIS message:
# ais.decode(r'568rWSP000009@9D000hT4r0L4pN3;D000000<o<000004000'
# r'0000000000000000000000',2)
msg = {
'id': 5,
'type_and_cargo': 204}
mangled = self.mangle(msg)
expected = {
'shiptype': 204,
'shiptype_text': '204 - Unknown',
'type': 5}
self.assertDictContainsSubset(expected, mangled)
def testTimestamps(self):
msg = {
'id': 1,
'tagblock_timestamp': 1431682043,
'year': 2015,
'month': 5,
'day': 15,
'hour': 9,
'minute': 27,
'second': 23,
}
mangled = self.mangle(msg)
expected = {
'type': 1,
'timestamp': '2015-05-15T09:27:23Z',
'tagblock_timestamp': '2015-05-15T09:27:23.000000Z'
}
self.assertDictContainsSubset(expected, mangled)
class StreamingTest(unittest.TestCase):
def setUp(self):
self.dir = os.path.split(__file__)[0]
def validate_file(self, base):
nmea_name = os.path.join(self.dir, base + '.nmea')
json_name = os.path.join(self.dir, base + '.json')
def Json():
with open(json_name) as f:
for msg in f:
yield json.loads(msg)
def Libais():
with open(nmea_name) as f:
for msg in ais.stream.decode(f):
yield ais.compatibility.gpsd.mangle(msg)
g = iter(Json())
a = iter(Libais())
try:
while True:
gmsg = six.advance_iterator(g)
amsg = six.advance_iterator(a)
while amsg['type'] != gmsg['type']:
amsg = six.advance_iterator(a)
if gmsg['type'] in known_problems:
for key in known_problems[gmsg['type']]:
if key in gmsg: del gmsg[key]
if key in amsg: del amsg[key]
diff = testutils.DictDiff(gmsg, amsg)
self.assertFalse(diff['changed'])
self.assertFalse(
diff['removed'],
'Removed not empty: %s\n %s\n %s' % (
diff['removed'],
amsg,
gmsg))
except StopIteration:
pass
def testTypeExamples(self):
self.validate_file("../data/typeexamples")
def testTagblock(self):
self.validate_file("../data/tagblock")
def HaveGpsdecode():
"""Return true if the gpsdecode binary is on the path or false if not."""
try:
subprocess.check_call(['gpsdecode', '-V'])
return True
except OSError:
return False
class TestActualGPSDCompatibility(unittest.TestCase):
def setUp(self):
self.dir = os.path.split(__file__)[0]
def validate_file(self, base):
nmea_name = os.path.join(self.dir, base + '.nmea')
json_name = os.path.join(self.dir, base + '.gpsdecode.json')
subprocess.check_call('gpsdecode < %s > %s' % (nmea_name, json_name),
shell=True)
try:
def Gpsd():
with open(json_name) as f:
for msg in f:
yield json.loads(msg)
def Libais():
with open(nmea_name) as f:
for msg in ais.stream.decode(f):
yield ais.compatibility.gpsd.mangle(msg)
g = iter(Gpsd())
a = iter(Libais())
try:
while True:
gmsg = six.advance_iterator(g)
amsg = six.advance_iterator(a)
while amsg['type'] != gmsg['type']:
amsg = six.advance_iterator(a)
if gmsg['type'] in known_problems:
for key in known_problems[gmsg['type']]:
if key in gmsg: del gmsg[key]
if key in amsg: del amsg[key]
diff = testutils.DictDiff(gmsg, amsg)
self.assertFalse(diff['changed'])
self.assertFalse(
diff['removed'],
'Removed not empty: %s\n %s\n %s' % (
diff['removed'],
amsg,
gmsg))
except StopIteration:
pass
finally:
os.unlink(json_name)
@unittest.skipIf(not HaveGpsdecode(), 'gpsdecode not in the path')
def testTypeExamples(self):
self.validate_file("../data/typeexamples")
@unittest.skipIf(not HaveGpsdecode(), 'gpsdecode not in the path')
def testTagblock(self):
self.validate_file("../data/tagblock")
if __name__ == '__main__':
unittest.main()
|
Face-Mask-Detection/Face mask detection using YOLO V4/utility-scripts/count_objs.py | swapnilgarg7/Face-X | 175 | 12725868 | import os
counts={"0":0,"1":0}
for file in os.listdir("./Dataset"):
if file.endswith(".txt") and file!='classes.txt':
f = open("./Dataset/"+file)
lines=f.readlines()
for line in lines:
counts[''+line[0]]+=1
print(counts) |
softdelete/admin/__init__.py | RedMoon32/django-softdelete | 242 | 12725930 | <filename>softdelete/admin/__init__.py
from softdelete.admin.admin import *
from softdelete.admin.forms import *
__all__ = ['SoftDeleteObjectAdmin',
'SoftDeleteRecordAdmin',
'ChangeSetAdmin',
'SoftDeleteObjectInline',
'SoftDeleteObjectAdminForm',
]
|
data/MSCOCO/MSCOCO.py | Qin-Folks/I2L-MeshNet_RELEASE | 544 | 12725945 | import os
import os.path as osp
import numpy as np
from config import cfg
import copy
import json
import scipy.io as sio
import cv2
import random
import math
import torch
import transforms3d
from pycocotools.coco import COCO
from utils.smpl import SMPL
from utils.preprocessing import load_img, process_bbox, augmentation
from utils.vis import vis_keypoints, vis_mesh, save_obj
from utils.transforms import world2cam, cam2pixel, pixel2cam, transform_joint_to_other_db
class MSCOCO(torch.utils.data.Dataset):
def __init__(self, transform, data_split):
self.transform = transform
self.data_split = 'train' if data_split == 'train' else 'val'
self.img_path = osp.join('..', 'data', 'MSCOCO', 'images')
self.annot_path = osp.join('..', 'data', 'MSCOCO', 'annotations')
self.rootnet_output_path = osp.join('..', 'data', 'MSCOCO', 'rootnet_output', 'bbox_root_coco_output.json')
self.fitting_thr = 3.0 # pixel in cfg.output_hm_shape space
# mscoco skeleton
self.coco_joint_num = 18 # original: 17, manually added pelvis
self.coco_joints_name = ('Nose', 'L_Eye', 'R_Eye', 'L_Ear', 'R_Ear', 'L_Shoulder', 'R_Shoulder', 'L_Elbow', 'R_Elbow', 'L_Wrist', 'R_Wrist', 'L_Hip', 'R_Hip', 'L_Knee', 'R_Knee', 'L_Ankle', 'R_Ankle', 'Pelvis')
self.coco_skeleton = ( (1, 2), (0, 1), (0, 2), (2, 4), (1, 3), (6, 8), (8, 10), (5, 7), (7, 9), (12, 14), (14, 16), (11, 13), (13, 15), (5, 6), (11, 12) )
self.coco_flip_pairs = ( (1, 2), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12), (13, 14), (15, 16) )
self.coco_joint_regressor = np.load(osp.join('..', 'data', 'MSCOCO', 'J_regressor_coco_hip_smpl.npy'))
# smpl skeleton
self.smpl = SMPL()
self.face = self.smpl.face
self.joint_regressor = self.smpl.joint_regressor
self.vertex_num = self.smpl.vertex_num
self.joint_num = self.smpl.joint_num
self.joints_name = self.smpl.joints_name
self.flip_pairs = self.smpl.flip_pairs
self.skeleton = self.smpl.skeleton
self.root_joint_idx = self.smpl.root_joint_idx
self.face_kps_vertex = self.smpl.face_kps_vertex
self.datalist = self.load_data()
def add_pelvis(self, joint_coord):
lhip_idx = self.coco_joints_name.index('L_Hip')
rhip_idx = self.coco_joints_name.index('R_Hip')
pelvis = (joint_coord[lhip_idx, :] + joint_coord[rhip_idx, :]) * 0.5
pelvis[2] = joint_coord[lhip_idx,2] * joint_coord[rhip_idx,2] # joint_valid
pelvis = pelvis.reshape(1, 3)
joint_coord = np.concatenate((joint_coord, pelvis))
return joint_coord
def load_data(self):
db = COCO(osp.join(self.annot_path, 'person_keypoints_' + self.data_split + '2017.json'))
with open(osp.join(self.annot_path, 'coco_smplifyx_train.json')) as f:
smplify_results = json.load(f)
datalist = []
if self.data_split == 'train':
for aid in db.anns.keys():
ann = db.anns[aid]
img = db.loadImgs(ann['image_id'])[0]
imgname = osp.join('train2017', img['file_name'])
img_path = osp.join(self.img_path, imgname)
width, height = img['width'], img['height']
if ann['iscrowd'] or (ann['num_keypoints'] == 0):
continue
# bbox
bbox = process_bbox(ann['bbox'], width, height)
if bbox is None: continue
# joint coordinates
joint_img = np.array(ann['keypoints'], dtype=np.float32).reshape(-1,3)
joint_img = self.add_pelvis(joint_img)
joint_valid = (joint_img[:,2].copy().reshape(-1,1) > 0).astype(np.float32)
joint_img[:,2] = 0
if str(aid) in smplify_results:
smplify_result = smplify_results[str(aid)]
else:
smplify_result = None
datalist.append({
'img_path': img_path,
'img_shape': (height, width),
'bbox': bbox,
'joint_img': joint_img,
'joint_valid': joint_valid,
'smplify_result': smplify_result
})
else:
with open(self.rootnet_output_path) as f:
rootnet_output = json.load(f)
print('Load RootNet output from ' + self.rootnet_output_path)
for i in range(len(rootnet_output)):
image_id = rootnet_output[i]['image_id']
if image_id not in db.imgs:
continue
img = db.loadImgs(image_id)[0]
imgname = osp.join('val2017', img['file_name'])
img_path = osp.join(self.img_path, imgname)
height, width = img['height'], img['width']
fx, fy, cx, cy = 1500, 1500, img['width']/2, img['height']/2
focal = np.array([fx, fy], dtype=np.float32); princpt = np.array([cx, cy], dtype=np.float32);
root_joint_depth = np.array(rootnet_output[i]['root_cam'][2])
bbox = np.array(rootnet_output[i]['bbox']).reshape(4)
cam_param = {'focal': focal, 'princpt': princpt}
datalist.append({
'img_path': img_path,
'img_shape': (height, width),
'bbox': bbox,
'root_joint_depth': root_joint_depth,
'cam_param': cam_param
})
return datalist
def get_smpl_coord(self, smpl_param, cam_param, do_flip, img_shape):
pose, shape, trans = smpl_param['pose'], smpl_param['shape'], smpl_param['trans']
smpl_pose = torch.FloatTensor(pose).view(1,-1); smpl_shape = torch.FloatTensor(shape).view(1,-1); # smpl parameters (pose: 72 dimension, shape: 10 dimension)
smpl_trans = torch.FloatTensor(trans).view(1,-1) # translation vector
# flip smpl pose parameter (axis-angle)
if do_flip:
smpl_pose = smpl_pose.view(-1,3)
for pair in self.flip_pairs:
if pair[0] < len(smpl_pose) and pair[1] < len(smpl_pose): # face keypoints are already included in self.flip_pairs. However, they are not included in smpl_pose.
smpl_pose[pair[0], :], smpl_pose[pair[1], :] = smpl_pose[pair[1], :].clone(), smpl_pose[pair[0], :].clone()
smpl_pose[:,1:3] *= -1; # multiply -1 to y and z axis of axis-angle
smpl_pose = smpl_pose.view(1,-1)
# get mesh and joint coordinates
smpl_mesh_coord, smpl_joint_coord = self.smpl.layer['neutral'](smpl_pose, smpl_shape, smpl_trans)
# incorporate face keypoints
smpl_mesh_coord = smpl_mesh_coord.numpy().astype(np.float32).reshape(-1,3);
smpl_joint_coord = smpl_joint_coord.numpy().astype(np.float32).reshape(-1,3)
smpl_face_kps_coord = smpl_mesh_coord[self.face_kps_vertex,:].reshape(-1,3)
smpl_joint_coord = np.concatenate((smpl_joint_coord, smpl_face_kps_coord))
# flip translation
if do_flip: # avg of old and new root joint should be image center.
focal, princpt = cam_param['focal'], cam_param['princpt']
flip_trans_x = 2 * (((img_shape[1] - 1)/2. - princpt[0]) / focal[0] * (smpl_joint_coord[self.root_joint_idx,2])) - 2 * smpl_joint_coord[self.root_joint_idx][0]
smpl_mesh_coord[:,0] += flip_trans_x
smpl_joint_coord[:,0] += flip_trans_x
# change to mean shape if beta is too far from it
smpl_shape[(smpl_shape.abs() > 3).any(dim=1)] = 0.
return smpl_mesh_coord, smpl_joint_coord, smpl_pose[0].numpy(), smpl_shape[0].numpy()
def get_fitting_error(self, coco_joint, smpl_mesh, cam_param, img2bb_trans, coco_joint_valid):
# get coco joint from smpl mesh
coco_from_smpl = np.dot(self.coco_joint_regressor, smpl_mesh)
coco_from_smpl = self.add_pelvis(coco_from_smpl) # z-axis component will be removed
coco_from_smpl = cam2pixel(coco_from_smpl, cam_param['focal'], cam_param['princpt'])
coco_from_smpl_xy1 = np.concatenate((coco_from_smpl[:,:2], np.ones_like(coco_from_smpl[:,0:1])),1)
coco_from_smpl[:,:2] = np.dot(img2bb_trans, coco_from_smpl_xy1.transpose(1,0)).transpose(1,0)
coco_from_smpl[:,0] = coco_from_smpl[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2]
coco_from_smpl[:,1] = coco_from_smpl[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1]
# mask joint coordinates
coco_joint = coco_joint[:,:2][np.tile(coco_joint_valid,(1,2))==1].reshape(-1,2)
coco_from_smpl = coco_from_smpl[:,:2][np.tile(coco_joint_valid,(1,2))==1].reshape(-1,2)
error = np.sqrt(np.sum((coco_joint - coco_from_smpl)**2,1)).mean()
return error
def __len__(self):
return len(self.datalist)
def __getitem__(self, idx):
data = copy.deepcopy(self.datalist[idx])
img_path, img_shape, bbox = data['img_path'], data['img_shape'], data['bbox']
# image load and affine transform
img = load_img(img_path)
img, img2bb_trans, bb2img_trans, rot, do_flip = augmentation(img, bbox, self.data_split)
img = self.transform(img.astype(np.float32))/255.
if self.data_split == 'train':
# coco gt
coco_joint_img = data['joint_img']
coco_joint_valid = data['joint_valid']
if do_flip:
coco_joint_img[:,0] = img_shape[1] - 1 - coco_joint_img[:,0]
for pair in self.coco_flip_pairs:
coco_joint_img[pair[0],:], coco_joint_img[pair[1],:] = coco_joint_img[pair[1],:].copy(), coco_joint_img[pair[0],:].copy()
coco_joint_valid[pair[0],:], coco_joint_valid[pair[1],:] = coco_joint_valid[pair[1],:].copy(), coco_joint_valid[pair[0],:].copy()
coco_joint_img_xy1 = np.concatenate((coco_joint_img[:,:2], np.ones_like(coco_joint_img[:,:1])),1)
coco_joint_img[:,:2] = np.dot(img2bb_trans, coco_joint_img_xy1.transpose(1,0)).transpose(1,0)
coco_joint_img[:,0] = coco_joint_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2]
coco_joint_img[:,1] = coco_joint_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1]
# backup for calculating fitting error
_coco_joint_img = coco_joint_img.copy()
_coco_joint_valid = coco_joint_valid.copy()
# check truncation
coco_joint_trunc = coco_joint_valid * ((coco_joint_img[:,0] >= 0) * (coco_joint_img[:,0] < cfg.output_hm_shape[2]) * \
(coco_joint_img[:,1] >= 0) * (coco_joint_img[:,1] < cfg.output_hm_shape[1])).reshape(-1,1).astype(np.float32)
# transform coco joints to target db joints
coco_joint_img = transform_joint_to_other_db(coco_joint_img, self.coco_joints_name, self.joints_name)
coco_joint_cam = np.zeros((self.joint_num,3), dtype=np.float32) # dummy
coco_joint_valid = transform_joint_to_other_db(coco_joint_valid, self.coco_joints_name, self.joints_name)
coco_joint_trunc = transform_joint_to_other_db(coco_joint_trunc, self.coco_joints_name, self.joints_name)
smplify_result = data['smplify_result']
if smplify_result is not None:
# use fitted mesh
smpl_param, cam_param = smplify_result['smpl_param'], smplify_result['cam_param']
smpl_mesh_cam, smpl_joint_cam, smpl_pose, smpl_shape = self.get_smpl_coord(smpl_param, cam_param, do_flip, img_shape)
smpl_coord_cam = np.concatenate((smpl_mesh_cam, smpl_joint_cam))
smpl_coord_img = cam2pixel(smpl_coord_cam, cam_param['focal'], cam_param['princpt'])
# x,y affine transform, root-relative depth
smpl_coord_img_xy1 = np.concatenate((smpl_coord_img[:,:2], np.ones_like(smpl_coord_img[:,0:1])),1)
smpl_coord_img[:,:2] = np.dot(img2bb_trans, smpl_coord_img_xy1.transpose(1,0)).transpose(1,0)[:,:2]
smpl_coord_img[:,2] = smpl_coord_img[:,2] - smpl_coord_cam[self.vertex_num + self.root_joint_idx][2]
smpl_coord_img[:,0] = smpl_coord_img[:,0] / cfg.input_img_shape[1] * cfg.output_hm_shape[2]
smpl_coord_img[:,1] = smpl_coord_img[:,1] / cfg.input_img_shape[0] * cfg.output_hm_shape[1]
smpl_coord_img[:,2] = (smpl_coord_img[:,2] / (cfg.bbox_3d_size / 2) + 1)/2. * cfg.output_hm_shape[0]
# check truncation
smpl_trunc = ((smpl_coord_img[:,0] >= 0) * (smpl_coord_img[:,0] < cfg.output_hm_shape[2]) * \
(smpl_coord_img[:,1] >= 0) * (smpl_coord_img[:,1] < cfg.output_hm_shape[1]) * \
(smpl_coord_img[:,2] >= 0) * (smpl_coord_img[:,2] < cfg.output_hm_shape[0])).reshape(-1,1).astype(np.float32)
# split mesh and joint coordinates
smpl_mesh_img = smpl_coord_img[:self.vertex_num]; smpl_joint_img = smpl_coord_img[self.vertex_num:];
smpl_mesh_trunc = smpl_trunc[:self.vertex_num]; smpl_joint_trunc = smpl_trunc[self.vertex_num:];
# if fitted mesh is too far from h36m gt, discard it
is_valid_fit = True
error = self.get_fitting_error(_coco_joint_img, smpl_mesh_cam, cam_param, img2bb_trans, _coco_joint_valid)
if error > self.fitting_thr:
is_valid_fit = False
else:
smpl_joint_img = np.zeros((self.joint_num,3), dtype=np.float32) # dummy
smpl_joint_cam = np.zeros((self.joint_num,3), dtype=np.float32) # dummy
smpl_mesh_img = np.zeros((self.vertex_num,3), dtype=np.float32) # dummy
smpl_pose = np.zeros((72), dtype=np.float32) # dummy
smpl_shape = np.zeros((10), dtype=np.float32) # dummy
smpl_joint_trunc = np.zeros((self.joint_num,1), dtype=np.float32)
smpl_mesh_trunc = np.zeros((self.vertex_num,1), dtype=np.float32)
is_valid_fit = False
# 3D data rotation augmentation
rot_aug_mat = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0],
[np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0],
[0, 0, 1]], dtype=np.float32)
# parameter
smpl_pose = smpl_pose.reshape(-1,3)
root_pose = smpl_pose[self.root_joint_idx,:]
root_pose, _ = cv2.Rodrigues(root_pose)
root_pose, _ = cv2.Rodrigues(np.dot(rot_aug_mat,root_pose))
smpl_pose[self.root_joint_idx] = root_pose.reshape(3)
smpl_pose = smpl_pose.reshape(-1)
# smpl coordinate
smpl_joint_cam = smpl_joint_cam - smpl_joint_cam[self.root_joint_idx,None] # root-relative
smpl_joint_cam = np.dot(rot_aug_mat, smpl_joint_cam.transpose(1,0)).transpose(1,0)
inputs = {'img': img}
targets = {'orig_joint_img': coco_joint_img, 'fit_joint_img': smpl_joint_img, 'fit_mesh_img': smpl_mesh_img, 'orig_joint_cam': coco_joint_cam, 'fit_joint_cam': smpl_joint_cam, 'pose_param': smpl_pose, 'shape_param': smpl_shape}
meta_info = {'orig_joint_valid': coco_joint_valid, 'orig_joint_trunc': coco_joint_trunc, 'fit_joint_trunc': smpl_joint_trunc, 'fit_mesh_trunc': smpl_mesh_trunc, 'is_valid_fit': float(is_valid_fit), 'is_3D': float(False)}
return inputs, targets, meta_info
else:
inputs = {'img': img}
targets = {}
meta_info = {'bb2img_trans': bb2img_trans}
return inputs, targets, meta_info
def evaluate(self, outs, cur_sample_idx):
annots = self.datalist
sample_num = len(outs)
eval_result = {}
for n in range(sample_num):
annot = annots[cur_sample_idx + n]
out = outs[n]
# x,y: resize to input image space and perform bbox to image affine transform
bb2img_trans = out['bb2img_trans']
mesh_out_img = out['mesh_coord_img']
mesh_out_img[:,0] = mesh_out_img[:,0] / cfg.output_hm_shape[2] * cfg.input_img_shape[1]
mesh_out_img[:,1] = mesh_out_img[:,1] / cfg.output_hm_shape[1] * cfg.input_img_shape[0]
mesh_out_img_xy1 = np.concatenate((mesh_out_img[:,:2], np.ones_like(mesh_out_img[:,:1])),1)
mesh_out_img[:,:2] = np.dot(bb2img_trans, mesh_out_img_xy1.transpose(1,0)).transpose(1,0)[:,:2]
# z: devoxelize and translate to absolute depth
root_joint_depth = annot['root_joint_depth']
mesh_out_img[:,2] = (mesh_out_img[:,2] / cfg.output_hm_shape[0] * 2. - 1) * (cfg.bbox_3d_size * 1000 / 2) # change cfg.bbox_3d_size from meter to milimeter
mesh_out_img[:,2] = mesh_out_img[:,2] + root_joint_depth
# camera back-projection
cam_param = annot['cam_param']
focal, princpt = cam_param['focal'], cam_param['princpt']
mesh_out_cam = pixel2cam(mesh_out_img, focal, princpt)
if cfg.stage == 'param':
mesh_out_cam = out['mesh_coord_cam']
vis = False
if vis:
filename = annot['img_path'].split('/')[-1][:-4] + '_' + str(n)
img = load_img(annot['img_path'])[:,:,::-1]
img = vis_mesh(img, mesh_out_img, 0.5)
cv2.imwrite(filename + '.jpg', img)
save_obj(mesh_out_cam, self.smpl.face, filename + '.obj')
return eval_result
def print_eval_result(self, eval_result):
pass
|
2015-10_Lecture/Lecture3/code/NER_Keras_with_Caseing.py | hlin117/deeplearning4nlp-tutorial | 593 | 12725973 | <filename>2015-10_Lecture/Lecture3/code/NER_Keras_with_Caseing.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
import numpy as np
import theano
import theano.tensor as T
import time
import gzip
import GermEvalReader
import GermEvalReader_with_casing
import BIOF1Validation
import keras
from keras.models import Sequential
from keras.layers.core import Dense, Flatten, Merge
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.layers.embeddings import Embedding
from KerasLayer.FixedEmbedding import FixedEmbedding
windowSize = 2 # 2 to the left, 2 to the right
numHiddenUnits = 100
trainFile = 'data/NER-de-train.tsv'
devFile = 'data/NER-de-dev.tsv'
testFile = 'data/NER-de-test.tsv'
print "NER with Keras with %s" % theano.config.floatX
#####################
#
# Read in the vocab
#
#####################
print "Read in the vocab"
vocabPath = 'embeddings/GermEval.vocab.gz'
word2Idx = {} #Maps a word to the index in the embeddings matrix
embeddings = [] #Embeddings matrix
with gzip.open(vocabPath, 'r') as fIn:
idx = 0
for line in fIn:
split = line.strip().split(' ')
embeddings.append(np.array([float(num) for num in split[1:]]))
word2Idx[split[0]] = idx
idx += 1
embeddings = np.asarray(embeddings, dtype=theano.config.floatX)
embedding_size = embeddings.shape[1]
# Create a mapping for our labels
label2Idx = {'O':0}
idx = 1
for bioTag in ['B-', 'I-']:
for nerClass in ['PER', 'LOC', 'ORG', 'OTH']:
for subtype in ['', 'deriv', 'part']:
label2Idx[bioTag+nerClass+subtype] = idx
idx += 1
#Inverse label mapping
idx2Label = {v: k for k, v in label2Idx.items()}
#Casing matrix
caseLookup = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'PADDING':5}
caseMatrix = np.identity(len(caseLookup), dtype=theano.config.floatX)
# Read in data
print "Read in data and create matrices"
train_sentences = GermEvalReader.readFile(trainFile)
dev_sentences = GermEvalReader.readFile(devFile)
test_sentences = GermEvalReader.readFile(testFile)
# Create numpy arrays
train_x, train_case_x, train_y = GermEvalReader_with_casing.createNumpyArrayWithCasing(train_sentences, windowSize, word2Idx, label2Idx, caseLookup)
dev_x, dev_case_x, dev_y = GermEvalReader_with_casing.createNumpyArrayWithCasing(dev_sentences, windowSize, word2Idx, label2Idx, caseLookup)
test_x, test_case_x, test_y = GermEvalReader_with_casing.createNumpyArrayWithCasing(test_sentences, windowSize, word2Idx, label2Idx, caseLookup)
#####################################
#
# Create the Lasagne Network
#
#####################################
# Create the train and predict_labels function
n_in = 2*windowSize+1
n_hidden = numHiddenUnits
n_out = len(label2Idx)
number_of_epochs = 10
minibatch_size = 35
embedding_size = embeddings.shape[1]
dim_case = 6
x = T.imatrix('x') # the data, one word+context per row
y = T.ivector('y') # the labels are presented as 1D vector of [int] labels
print "Embeddings shape",embeddings.shape
words = Sequential()
words.add(FixedEmbedding(output_dim=embeddings.shape[1], input_dim=embeddings.shape[0], input_length=n_in, weights=[embeddings])) #input_length=n_in,
words.add(Flatten())
casing = Sequential()
#casing.add(Embedding(output_dim=dim_case, input_dim=len(caseLookup), input_length=n_in))
casing.add(FixedEmbedding(output_dim=caseMatrix.shape[1], input_dim=caseMatrix.shape[0], input_length=n_in, weights=[caseMatrix]))
casing.add(Flatten())
model = Sequential()
model.add(Merge([words, casing], mode='concat'))
model.add(Dense(output_dim=n_hidden, input_dim=n_in*embedding_size, init='uniform', activation='tanh'))
model.add(Dense(output_dim=n_out, init='uniform', activation='softmax'))
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
print(train_x.shape[0], 'train samples')
print(train_x.shape[1], 'train dimension')
print(test_x.shape[0], 'test samples')
train_y_cat = np_utils.to_categorical(train_y, n_out)
#Function that helps to iterate over our data in minibatches
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
number_of_epochs = 10
minibatch_size = 35
print "%d epochs" % number_of_epochs
print "%d mini batches" % (len(train_x)/minibatch_size)
for epoch in xrange(number_of_epochs):
start_time = time.time()
model.fit([train_x, train_case_x], train_y_cat, nb_epoch=1, batch_size=minibatch_size, verbose=0, shuffle=False)
#for batch in iterate_minibatches(train_x, train_y_cat, minibatch_size, shuffle=False):
# inputs, targets = batch
# model.train_on_batch(inputs, targets)
print "%.2f sec for training" % (time.time() - start_time)
pre_dev, rec_dev, f1_dev = BIOF1Validation.compute_f1(model.predict_classes([dev_x, dev_case_x], verbose=0), dev_y, idx2Label)
pre_test, rec_test, f1_test = BIOF1Validation.compute_f1(model.predict_classes([test_x, test_case_x], verbose=0), test_y, idx2Label)
print "%d epoch: F1 on dev: %f, F1 on test: %f" % (epoch+1, f1_dev, f1_test)
|
IndicNLP/indicnlp/common.py | Adityashar/Sanskrit-Machine-Translation | 432 | 12725979 | #
# Copyright (c) 2013-present, <NAME>
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import os
"""
Path to the Indic NLP Resources directory
"""
INDIC_RESOURCES_PATH=''
def init():
"""
Initialize the module. The following actions are performed:
- Checks of INDIC_RESOURCES_PATH variable is set. If not, checks if it can beb initialized from
INDIC_RESOURCES_PATH environment variable. If that fails, an exception is raised
"""
global INDIC_RESOURCES_PATH
try:
if INDIC_RESOURCES_PATH=='':
INDIC_RESOURCES_PATH=os.environ['INDIC_RESOURCES_PATH']
except Exception as e:
raise IndicNlpException('INDIC_RESOURCES_PATH not set')
if INDIC_RESOURCES_PATH=='':
raise IndicNlpException('INDIC_RESOURCES_PATH not set')
def get_resources_path():
"""
Get the path to the Indic NLP Resources directory
"""
return INDIC_RESOURCES_PATH
def set_resources_path(resources_path):
"""
Set the path to the Indic NLP Resources directory
"""
global INDIC_RESOURCES_PATH
INDIC_RESOURCES_PATH=resources_path
class IndicNlpException(Exception):
"""
Exceptions thrown by Indic NLP Library components are instances of this class.
'msg' attribute contains exception details.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
|
old_py2/controllers/main_controller.py | ofekashery/the-blue-alliance | 266 | 12726062 | <gh_stars>100-1000
from collections import defaultdict
import datetime
import logging
import webapp2
from google.appengine.api import memcache
from google.appengine.ext import ndb
import tba_config
from base_controller import CacheableHandler
from consts.award_type import AwardType
from consts.event_type import EventType
from consts.landing_type import LandingType
from consts.media_tag import MediaTag
from consts.media_type import MediaType
from database import media_query
from helpers.event_helper import EventHelper
from helpers.season_helper import SeasonHelper
from helpers.team_helper import TeamHelper
from helpers.firebase.firebase_pusher import FirebasePusher
from models.award import Award
from models.event import Event
from models.insight import Insight
from models.media import Media
from models.team import Team
from models.sitevar import Sitevar
from template_engine import jinja2_engine
def render_static(page):
memcache_key = "main_%s" % page
html = memcache.get(memcache_key)
if html is None:
html = jinja2_engine.render('%s.html' % page, {})
if tba_config.CONFIG["memcache"]:
memcache.set(memcache_key, html, 86400)
return html
def handle_404(request, response, exception):
response.write(render_static("404"))
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write(render_static("500"))
response.set_status(500)
class AvatarsHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "avatars_{}"
def __init__(self, *args, **kw):
super(AvatarsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
def get(self, year):
year = int(year)
if year not in {2018, 2019, 2020}:
self.abort(404)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(year)
super(AvatarsHandler, self).get(year)
def _render(self, year):
year = int(year)
avatars = []
shards = memcache.get_multi(['{}avatars_{}'.format(year, i) for i in xrange(10)])
if len(shards) == 10: # If missing a shard, must refetch all
for _, shard in sorted(shards.items(), key=lambda kv: kv[0]):
avatars += shard
if not avatars:
avatars_future = Media.query(Media.media_type_enum == MediaType.AVATAR, Media.year == year).fetch_async()
avatars = sorted(avatars_future.get_result(), key=lambda a: int(a.references[0].id()[3:]))
shards = {}
size = len(avatars) / 10 + 1
for i in xrange(10):
start = i * size
end = start + size
shards['{}avatars_{}'.format(year, i)] = avatars[start:end]
memcache.set_multi(shards, 60*60*24)
self.template_values.update({
'year': year,
'avatars': avatars,
})
return jinja2_engine.render('avatars.html', self.template_values)
class TwoChampsHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "two_champs_{}_{}"
def __init__(self, *args, **kw):
super(TwoChampsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24
self._team_key_a = self.request.get('team_a', None)
self._team_key_b = self.request.get('team_b', None)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self._team_key_a, self._team_key_b)
def _render(self, *args, **kw):
team_a = Team.get_by_id(self._team_key_a) if self._team_key_a else None
team_b = Team.get_by_id(self._team_key_b) if self._team_key_b else None
self.template_values.update({
'team_a': team_a,
'team_b': team_b,
})
return jinja2_engine.render('2champs.html', self.template_values)
class ContactHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_contact"
def __init__(self, *args, **kw):
super(ContactHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('contact.html', self.template_values)
class PrivacyHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_privacy"
def __init__(self, *args, **kw):
super(PrivacyHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('privacy.html', self.template_values)
class HashtagsHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_hashtags"
def __init__(self, *args, **kw):
super(HashtagsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('hashtags.html', self.template_values)
class FIRSTHOFHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "main_first_hof"
def __init__(self, *args, **kw):
super(FIRSTHOFHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
awards_future = Award.query(
Award.award_type_enum==AwardType.CHAIRMANS,
Award.event_type_enum==EventType.CMP_FINALS).fetch_async()
teams_by_year = defaultdict(list)
for award in awards_future.get_result():
for team_key in award.team_list:
teams_by_year[award.year].append((
team_key.get_async(),
award.event.get_async(),
award,
media_query.TeamTagMediasQuery(team_key.id(), MediaTag.CHAIRMANS_VIDEO).fetch_async(),
media_query.TeamTagMediasQuery(team_key.id(), MediaTag.CHAIRMANS_PRESENTATION).fetch_async(),
media_query.TeamTagMediasQuery(team_key.id(), MediaTag.CHAIRMANS_ESSAY).fetch_async(),
))
teams_by_year = sorted(teams_by_year.items(), key=lambda (k, v): -k)
for _, tea in teams_by_year:
tea.sort(key=lambda x: x[1].get_result().start_date)
self.template_values.update({
'teams_by_year': teams_by_year,
})
return jinja2_engine.render('hof.html', self.template_values)
class ThanksHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_thanks"
def __init__(self, *args, **kw):
super(ThanksHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('thanks.html', self.template_values)
class OprHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_opr"
def __init__(self, *args, **kw):
super(OprHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('opr.html', self.template_values)
class PredictionsHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "main_predictions"
def __init__(self, *args, **kw):
super(PredictionsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('predictions.html', self.template_values)
class SearchHandler(webapp2.RequestHandler):
def get(self):
try:
q = self.request.get("q")
logging.info("search query: %s" % q)
if q.isdigit():
team_id = "frc%s" % int(q)
team = Team.get_by_id(team_id)
if team:
self.redirect(team.details_url)
return None
elif q[:4].isdigit(): # Check for event key
event = Event.get_by_id(q)
if event:
self.redirect(event.details_url)
return None
else: # Check for event short
year = datetime.datetime.now().year # default to current year
event = Event.get_by_id('{}{}'.format(year, q))
if event:
self.redirect(event.details_url)
return None
except Exception, e:
logging.warning("warning: %s" % e)
finally:
self.response.out.write(render_static("search"))
class WebcastsHandler(CacheableHandler):
CACHE_VERSION = 2
CACHE_KEY_FORMAT = "main_webcasts"
def __init__(self, *args, **kw):
super(WebcastsHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
year = datetime.datetime.now().year
event_keys = Event.query(Event.year == year).order(Event.start_date).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
self.template_values.update({
'events': events,
'year': year,
})
return jinja2_engine.render('webcasts.html', self.template_values)
class ApiWriteHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "api_write"
def __init__(self, *args, **kw):
super(ApiWriteHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('apiwrite.html', self.template_values)
class BrandHandler(CacheableHandler):
CACHE_VERSION = 1
CACHE_KEY_FORMAT = "main_brand"
def __init__(self, *args, **kw):
super(BrandHandler, self).__init__(*args, **kw)
self._cache_expiration = 60 * 60 * 24 * 7
def _render(self, *args, **kw):
return jinja2_engine.render('brand.html', self.template_values)
|
manga_py/base_classes/arc_name.py | sonvt1710/manga-py | 337 | 12726073 | from abc import abstractmethod, ABCMeta
class ArchiveName(metaclass=ABCMeta):
_vol_fill = False
@abstractmethod
def get_chapter_index(self):
pass
def get_archive_name(self) -> str:
idx = self.get_chapter_index()
self._vol_fill = True
return self.normal_arc_name({'vol': idx.split('-')})
def normal_arc_name(self, idx) -> str:
if isinstance(idx, (str, int)):
idx = [idx]
if isinstance(idx, list):
self._vol_fill = True
return self.__normal_name_list(idx)
if isinstance(idx, dict):
return self.__normal_name_dict(idx)
raise DeprecationWarning('Wrong arc name type: %s' % type(idx))
def __normal_name_dict(self, idx: dict) -> str:
vol = idx.get('vol', None)
ch = idx.get('ch', None)
result = ''
if vol:
if isinstance(vol, str):
vol = [vol]
result = self.__normal_name_list(vol)
if ch:
if vol:
result += '-'
result += 'ch_' + self.__fill(ch)
if self._with_manga_name:
name = self._params.get('name', '')
if not len(name):
name = self.manga_name
result = '%s-%s' % (name, result)
return result
def __normal_name_list(self, idx: list) -> str:
fmt = 'vol_{:0>3}'
if len(idx) > 1:
fmt += '-{}' * (len(idx) - 1)
elif self._vol_fill and self._zero_fill:
idx.append('0')
fmt += '-{}'
return fmt.format(*idx)
@staticmethod
def __fill(var, fmt: str = '-{}') -> str:
if isinstance(var, str):
var = [var]
return (fmt * len(var)).format(*var).lstrip('-')
|
pydis_site/apps/home/views/__init__.py | Robin5605/site | 700 | 12726082 | from .home import HomeView, timeline
__all__ = ["HomeView", "timeline"]
|
memorious/logic/check.py | Rosencrantz/memorious | 265 | 12726118 | import numbers
import re
from normality import stringify
from dateutil.parser import parse
class ContextCheck(object):
def __init__(self, context):
self.context = context
def shout(self, msg, strict=False, *args):
if strict:
raise ValueError(msg % args)
else:
self.context.log.info(msg, *args)
def is_not_empty(self, value, strict=False):
"""if value is not empty"""
value = stringify(value)
if value is not None:
return
self.shout("Value %r is empty", strict, value)
def is_numeric(self, value, strict=False):
"""if value is numeric"""
value = stringify(value)
if value is not None:
if value.isnumeric():
return
self.shout("value %r is not numeric", strict, value)
def is_integer(self, value, strict=False):
"""if value is an integer"""
if value is not None:
if isinstance(value, numbers.Number):
return
value = stringify(value)
if value is not None and value.isnumeric():
return
self.shout("value %r is not an integer", strict, value)
def match_date(self, value, strict=False):
"""if value is a date"""
value = stringify(value)
try:
parse(value)
except Exception:
self.shout("Value %r is not a valid date", strict, value)
def match_regexp(self, value, q, strict=False):
"""if value matches a regexp q"""
value = stringify(value)
mr = re.compile(q)
if value is not None:
if mr.match(value):
return
self.shout("%r not matching the regexp %r", strict, value, q)
def has_length(self, value, q, strict=False):
"""if value has a length of q"""
value = stringify(value)
if value is not None:
if len(value) == q:
return
self.shout("Value %r not matching length %r", strict, value, q)
def must_contain(self, value, q, strict=False):
"""if value must contain q"""
if value is not None:
if value.find(q) != -1:
return
self.shout("Value %r does not contain %r", strict, value, q)
|
scripts/popgen/operatorfactory.py | gglin001/poptorch | 128 | 12726131 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved
from popgen import NonTensorValue, Value, onnx, poptorch
from popgen.helpers import empty_initializer
# no_tensor_braces(v):
#
# Modifiers for values that take tensors without initializer list braces
# Parameters:
# v - the input value
def no_tensor_braces(v):
v.tensor_braces = False
return v
# def check_operator_signature(value, signatures)
#
# Verify an operator has correct signature
# Parameters:
# value - the operator
# signatures - signatures' dictionary
def check_operator_signature(value, signatures):
assert value.op in signatures, \
str(value.op) + " is not a supported operator"
actual_args = value.args
expected_args = signatures[value.op]
# check non-tensor arguments
first_non_tensor = -1
if expected_args[0] == 'Args':
for i, arg in enumerate(actual_args):
if arg.op == 'empty_initializer':
continue
if isinstance(arg, NonTensorValue):
first_non_tensor = i
break
assert first_non_tensor != 0, 'Expecting at least 1 tensor ' + \
'argument for ' + value.op
# no non-tensor arguments
if first_non_tensor == -1:
return value
# check non-tensor arguments
expected_args = expected_args[1:]
actual_args = actual_args[first_non_tensor:]
# assume any missing arguments are optional
for i in range(1, len(expected_args) - len(actual_args)):
actual_args.append('None')
for i, arg in enumerate(actual_args):
if isinstance(arg, Value):
arg = arg.op
assert arg in expected_args[i], 'Incorrect operand ' + str(i) + \
'for ' + value.op + '. Got ' + arg + ' , expecting ' + \
'one of: ' + str(expected_args[i])
return value
# Factory class for creating popArt ops. Operators are created
# on the fly based on spelling of attributes.
class OperatorFactory:
def __getattr__(self, name):
if name in onnx.signatures:
return lambda *args: \
check_operator_signature(Value(name, list(args)), \
onnx.signatures)
if name in poptorch.signatures:
return lambda *args: \
check_operator_signature(Value(name, list(args)), \
poptorch.signatures)
raise ValueError(name + " is not a supported operator")
def cast(self, t, ty):
value = no_tensor_braces(Value('cast', [t, ty]))
check_operator_signature(value, poptorch.signatures)
return value
def internalCast(self, t, ty):
value = no_tensor_braces(Value('internalCast', [t, ty]))
check_operator_signature(value, poptorch.signatures)
return value
def constantPad(self, x, l, c):
value = no_tensor_braces(Value('constantPad', [x, l, c]))
check_operator_signature(value, poptorch.signatures)
return value
def edgePad(self, t, l):
value = no_tensor_braces(Value('edgePad', [t, l]))
check_operator_signature(value, poptorch.signatures)
return value
def printIpuTensor(self, t, s):
value = no_tensor_braces(Value('printIpuTensor', [t, s]))
check_operator_signature(value, poptorch.signatures)
return value
def callCpuOp(self, t, s, n):
value = no_tensor_braces(Value('callCpuOp', [t, s, n]))
check_operator_signature(value, poptorch.signatures)
return value
def transpose(self, t):
value = Value('transpose', [t, empty_initializer()])
check_operator_signature(value, onnx.signatures)
return value
def randomNormal(self, x, shape, high, low, scalar_type=None):
args = [x, shape, high, low]
if scalar_type is not None:
args += [scalar_type]
value = Value('randomNormal', args)
check_operator_signature(value, poptorch.signatures)
return value
def randomUniform(self, x, shape, high, low, scalar_type=None):
args = [x, shape, high, low]
if scalar_type is not None:
args += [scalar_type]
value = no_tensor_braces(Value('randomUniform', args))
check_operator_signature(value, poptorch.signatures)
return value
def recomputationCheckpoint(self, x):
value = no_tensor_braces(Value('recomputationCheckpoint', [x]))
check_operator_signature(value, poptorch.signatures)
return value
def reflectionPad(self, t, l):
value = no_tensor_braces(Value('reflectionPad', [t, l]))
check_operator_signature(value, poptorch.signatures)
return value
def setAvailableMemory(self, x, y):
value = no_tensor_braces(Value('setAvailableMemory', [x, y]))
check_operator_signature(value, poptorch.signatures)
return value
def setMatMulSerialization(self, x, s, a, b):
value = no_tensor_braces(Value('setMatMulSerialization', [x, s, a, b]))
check_operator_signature(value, poptorch.signatures)
return value
def endForLoop(self, output, inputs, trip_count):
value = no_tensor_braces(
Value('endForLoop', [output, inputs, trip_count]))
check_operator_signature(value, poptorch.signatures)
return value
op = OperatorFactory()
|
boto3_type_annotations/boto3_type_annotations/xray/paginator.py | cowboygneox/boto3_type_annotations | 119 | 12726133 | from typing import List
from typing import Dict
from datetime import datetime
from botocore.paginate import Paginator
class BatchGetTraces(Paginator):
def paginate(self, TraceIds: List, PaginationConfig: Dict = None) -> Dict:
pass
class GetGroups(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class GetSamplingRules(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class GetSamplingStatisticSummaries(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
pass
class GetServiceGraph(Paginator):
def paginate(self, StartTime: datetime, EndTime: datetime, GroupName: str = None, GroupARN: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetTimeSeriesServiceStatistics(Paginator):
def paginate(self, StartTime: datetime, EndTime: datetime, GroupName: str = None, GroupARN: str = None, EntitySelectorExpression: str = None, Period: int = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetTraceGraph(Paginator):
def paginate(self, TraceIds: List, PaginationConfig: Dict = None) -> Dict:
pass
class GetTraceSummaries(Paginator):
def paginate(self, StartTime: datetime, EndTime: datetime, TimeRangeType: str = None, Sampling: bool = None, SamplingStrategy: Dict = None, FilterExpression: str = None, PaginationConfig: Dict = None) -> Dict:
pass
|
veriloggen/dataflow/mul.py | akmaru/veriloggen | 232 | 12726141 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import print_function
import veriloggen.core.vtypes as vtypes
import veriloggen.core.module as module
def mkMultiplierCore(index, lwidth=32, rwidth=32, lsigned=True, rsigned=True, depth=6):
if lwidth <= 0:
raise ValueError("data width must be greater than 0.")
if rwidth <= 0:
raise ValueError("data width must be greater than 0.")
if depth < 2:
raise ValueError("depth must be greater than 1.")
retwidth = lwidth + rwidth
m = module.Module('multiplier_core_%d' % index)
clk = m.Input('CLK')
update = m.Input('update')
a = m.Input('a', lwidth)
b = m.Input('b', rwidth)
c = m.Output('c', retwidth)
_a = m.Reg('_a', lwidth, signed=lsigned)
_b = m.Reg('_b', rwidth, signed=rsigned)
_mul = m.Wire('_mul', retwidth, signed=True)
_pipe_mul = [m.Reg('_pipe_mul%d' % i, retwidth, signed=True)
for i in range(depth - 1)]
__a = _a
__b = _b
if not lsigned:
__a = vtypes.SystemTask(
'signed', vtypes.Cat(vtypes.Int(0, width=1), _a))
if not rsigned:
__b = vtypes.SystemTask(
'signed', vtypes.Cat(vtypes.Int(0, width=1), _b))
m.Assign(_mul(__a * __b))
m.Assign(c(_pipe_mul[depth - 2]))
m.Always(vtypes.Posedge(clk))(
vtypes.If(update)(
_a(a),
_b(b),
_pipe_mul[0](_mul),
[_pipe_mul[i](_pipe_mul[i - 1]) for i in range(1, depth - 1)]
))
return m
def mkMultiplier(index, lwidth=32, rwidth=32, lsigned=True, rsigned=True, depth=6):
if lwidth <= 0:
raise ValueError("data width must be greater than 0.")
if rwidth <= 0:
raise ValueError("data width must be greater than 0.")
if depth < 2:
raise ValueError("depth must be greater than 1.")
retwidth = lwidth + rwidth
mult = mkMultiplierCore(index, lwidth, rwidth, lsigned, rsigned, depth)
m = module.Module('multiplier_%d' % index)
clk = m.Input('CLK')
rst = m.Input('RST')
update = m.Input('update')
enable = m.Input('enable')
valid = m.Output('valid')
a = m.Input('a', lwidth)
b = m.Input('b', rwidth)
c = m.Output('c', retwidth)
valid_reg = [m.Reg('valid_reg%d' % i) for i in range(depth)]
m.Assign(valid(valid_reg[depth - 1]))
m.Always(vtypes.Posedge(clk))(
vtypes.If(rst)(
[valid_reg[i](0) for i in range(depth)]
).Else(
vtypes.If(update)(
valid_reg[0](enable),
[valid_reg[i](valid_reg[i - 1]) for i in range(1, depth)]
)
))
ports = [('CLK', clk), ('update', update), ('a', a), ('b', b), ('c', c)]
m.Instance(mult, 'mult', ports=ports)
return m
# global multiplier count
index_count = 0
def get_mul(lwidth=32, rwidth=32, lsigned=True, rsigned=True, depth=6):
global index_count
mul = mkMultiplier(index_count, lwidth, rwidth, lsigned, rsigned, depth)
index_count += 1
return mul
def reset():
global index_count
index_count = 0
|
fedot/core/dag/graph_node.py | rozlana-g/FEDOT | 358 | 12726158 | from typing import List, Optional, Union
from fedot.core.dag.node_operator import NodeOperator
class GraphNode:
"""
Class for node definition in the DAG-based structure
:param nodes_from: parent nodes which information comes from
:param content: dict for the content in node
The possible parameters are:
'name' - name (str) or object that performs actions in this node
'params' - dictionary with additional information that is used by
the object in the 'name' field (e.g. hyperparameters values).
"""
def __init__(self, content: Union[dict, str],
nodes_from: Optional[List['GraphNode']] = None):
self.nodes_from = nodes_from
# Wrap string into dict if it is necessary
if isinstance(content, str):
content = {'name': content}
self.content = content
self._operator = NodeOperator(self)
def __str__(self):
return str(self.content['name'])
def __repr__(self):
return self.__str__()
@property
def descriptive_id(self):
return self._operator.descriptive_id()
def ordered_subnodes_hierarchy(self, visited=None) -> List['GraphNode']:
return self._operator.ordered_subnodes_hierarchy(visited)
@property
def distance_to_primary_level(self):
return self._operator.distance_to_primary_level()
|
windowgram/windowgram.py | lion24/tmuxomatic | 846 | 12726171 | <filename>windowgram/windowgram.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2013-2016, Oxidane
All rights reserved
This source has NOT yet been licensed for redistribution, modification, or inclusion in other projects.
An exception has been granted to the official tmuxomatic project, originating from the following addresses:
https://github.com/oxidane/tmuxomatic
https://pypi.python.org/pypi/tmuxomatic
A proper open source license is expected to be applied sometime after the release of this windowgram module as a
separate project. Please check this source at a later date for these changes.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##----------------------------------------------------------------------------------------------------------------------
##
## Name ....... windowgram
## Synopsis ... Module for windowgram flex processing 1.x
## Author ..... Oxidane
## License .... (To Be Determined)
## Source ..... (To Be Announced)
##
##---------------+------------------------------------------------------------------------------------------------------
## About |
##---------------+
##
## The windowgram originated as the central concept in the tmuxomatic project. It has since been expanded to include
## flex, a functional windowgram modification language using natural syntax and spatially oriented objects.
##
## Not ready to reveal the future plans for this project. Check back for updates.
##
##--------------+-------------------------------------------------------------------------------------------------------
## TODO |
##--------------+
##
## Implement the planned flex modifiers
##
##----------------------------------------------------------------------------------------------------------------------
import sys, argparse, re, math, copy, inspect, operator
##----------------------------------------------------------------------------------------------------------------------
##
## Definitions
##
## TODO: Move these into a class
##
##----------------------------------------------------------------------------------------------------------------------
# Panes Primary
PANE_CHARACTERS = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" # Official order "[0-9a-zA-Z]"
MAXIMUM_PANES = len(PANE_CHARACTERS) # 62 maximum panes (not windows)
# Panes Extended (these characters are never saved to file)
MASKPANE_X = "." # Transparency pane id
MASKPANE_1 = "@" # Mask character: One
MASKPANE_0 = ":" # Mask character: Zero
PANE_CHAR_ALL = "*" # Used as a windowgram reference in some flex commands
PANE_CHAR_COM = "#" # Cannot be used: Session file stripped
PANE_CHAR_SPA = " " # Cannot be used: Session file stripped
# Reserved panes
PANE_RESERVED = MASKPANE_X + MASKPANE_1 + MASKPANE_0 # Valid ephemeral characters
PANE_RESERVED_X = PANE_CHAR_SPA + PANE_CHAR_COM + PANE_CHAR_ALL # Invalid or used as wildcard
# Defaults
NEW_WINDOWGRAM = "1"
##----------------------------------------------------------------------------------------------------------------------
##
## Window splitter logic
##
## This converts a windowgram to a layout with split mechanics (tmux).
##
##----------------------------------------------------------------------------------------------------------------------
def SplitProcessor_SplitWindow( sw, dim, at_linkid, linkid, list_split, list_links, how, of_this ):
"""
Splits the window 'at_linkid' along axis 'how'
Variable 'how': 'v' = Vertical (new = below), 'h' = Horizontal (new = right)
"""
def translate( pane, window, screen ):
# Returns scaled pane according to windowgram and screen dimensions
return int( float(pane) / float(window) * float(screen) )
# Initialize
at_tmux = ""
for llit in list_links:
if llit[0] == at_linkid:
at_tmux = llit[1]
break
if at_tmux == "": return
for llx, llit in enumerate(list_links):
if llit[1] > at_tmux:
list_links[llx] = ( llit[0], llit[1]+1 ) # Shift the index to accommodate new pane
linkid[0] += 1
this_ent = {}
# The dimensions for the newly created window are based on the parent (accounts for the one character divider)
for ent in list_split:
if ent['linkid'] == at_linkid:
this_ent = ent
break
if this_ent:
if how == 'v':
of_this = translate( of_this, dim['win'][1], dim['scr'][1] ) # From size-in-definition to size-on-screen
w = this_ent['inst_w']
h = of_this - 1
per = str( float(of_this) / float(this_ent['inst_h']) * 100.0 )
if sw['relative']:
this_ent['inst_h'] = int(this_ent['inst_h']) - of_this # Subtract split from root pane
else: # elif how == 'h':
of_this = translate( of_this, dim['win'][0], dim['scr'][0] ) # From size-in-definition to size-on-screen
w = of_this - 1
h = this_ent['inst_h']
per = str( float(of_this) / float(this_ent['inst_w']) * 100.0 )
if sw['relative']:
this_ent['inst_w'] = int(this_ent['inst_w']) - of_this # Subtract split from root pane
# Split list tracks tmux pane number at the time of split (for building the split commands)
list_split.append( { 'linkid':linkid[0], 'tmux':at_tmux, 'split':how, 'inst_w':w, 'inst_h':h, 'per':per } )
# Now add the new window's pane id, this is shifted up as insertions below it occur (see above)
at_tmux += 1
list_links.append( (linkid[0], at_tmux) )
def SplitProcessor_FindCleanBreak( sw, vertical, pos, list_panes, bx, by, bw, bh ):
"""
Finds a split on an axis within the specified bounds, if found returns True, otherwise False.
This shares an edge case with tmux that is an inherent limitation in the way that tmux works.
For more information on this edge case, look over the example file "session_unsupported".
Important note about the clean break algorithm used. The caller scans all qualifying panes,
then it uses each qualifying side as a base from which it calls this function. Here we scan
all qualifying panes to complete a match (see scanline). If the result is a clean break,
this function returns True, and the caller has the location of the break. While there's room
for optimization (probably best ported to C++, where the scanline technique will be really
fast), it probably isn't needed since it's likely to be adequate even on embedded systems.
"""
#-----------------------------------------------------------------------------------------------
#
# Outline: Clean Break Algorithm (1.0.1)
# ~ Establish pointers
# ~ Initialize scanline, used for detecting a clean break spanning multiple panes
# ~ For each qualifying pane that has a shared edge
# ~ If shared edge overlaps, add it to the scanline
# ~ If scanline has no spaces, then a clean break has been found, return True
# ~ Nothing found, return False
#
#-----------------------------------------------------------------------------------------------
# Notify user
if sw['scanline'] and sw['verbose'] >= 3:
sw['print']("(3) Scanline: Find clean " + [ "horizontal", "vertical" ][vertical] + \
" break at position " + str(pos))
# ~ Establish pointers
if vertical: sl_bgn, sl_siz = bx, bw # Vertical split is a horizontal line
else: sl_bgn, sl_siz = by, bh # Horizontal split is a vertical line
# ~ Initialize scanline, used for detecting a clean break spanning multiple panes
scanline = list(' ' * sl_siz) # Sets the scanline to spaces (used as a disqualifier)
# ~ For each qualifying pane that has a shared edge
for pane in list_panes:
# Disqualifiers
if 's' in pane: continue # Processed panes are out of bounds, all its edges are taken
if pane['y'] >= by+bh or pane['y']+pane['h'] <= by: continue # Fully out of bounds
if pane['x'] >= bx+bw or pane['x']+pane['w'] <= bx: continue # Fully out of bounds
if vertical and pane['y'] != pos and pane['y']+pane['h'] != pos: continue # No alignment
if not vertical and pane['x'] != pos and pane['x']+pane['w'] != pos: continue # No alignment
# ~ If shared edge found, add it to the scanline
if vertical: sl_pos, sl_len = pane['x'], pane['w'] # Vertical split is a horizontal line
else: sl_pos, sl_len = pane['y'], pane['h'] # Horizontal split is a vertical line
if sl_pos < sl_bgn: sl_len -= sl_bgn - sl_pos ; sl_pos = sl_bgn # Clip before
if sl_pos + sl_len > sl_bgn + sl_siz: sl_len = sl_bgn + sl_siz - sl_pos # Clip after
for n in range( sl_pos - sl_bgn, sl_pos - sl_bgn + sl_len ): scanline[n] = 'X'
# Show the scanline in action
if sw['scanline'] and sw['verbose'] >= 3:
sw['print']("(3) Scanline: [" + "".join(scanline) + "]: modified by pane " + pane['n'])
# ~ If scanline has no spaces, then a clean break has been found, return True
if not ' ' in scanline: return True
# ~ Nothing found, return False
return False
def SplitProcessor_FillerRecursive( sw, dim, linkid, l_split, l_links, l_panes, this_linkid, bx, by, bw, bh ):
"""
Once the panes have been loaded, this recursive function begins with the xterm dimensions.
Note that at this point, all sizes are still in characters, as they will be scaled later.
linkid[] Single entry list with last assigned linkid number (basically a reference)
l_split[{}] List of splits and from which pane at the time of split for recreation
l_links[()] List of linkid:tmux_pane associations, updated when split occurs
l_panes[{}] List of fully parsed user-defined panes as one dict per pane
this_linkid The linkid of the current window
bx, by, bw, bh The bounds of the current window
This algorithm supports all layouts supported by tmux.
Possible improvement for more accurate positioning: Scan for the best possible split, as
defined by its closest proximity to the top or left edges (alternatively: bottom or right).
This has yet to be checked for the intended effect of producing more consistent sizing.
"""
#-----------------------------------------------------------------------------------------------
#
# Outline: Filler Algorithm (1.0.1)
# ~ If any available pane is a perfect fit, link to linkid, mark as processed, return
# ~ Search panes for clean break, if found then split, reenter 1, reenter 2, return
# ~ If reached, user specified an unsupported layout that will be detected by caller, return
#
#-----------------------------------------------------------------------------------------------
def idstr( bx, by, bw, bh ):
# Print the rectangle for debugging purposes. Maybe change to use a rectangle class.
return "Rectangle( x=" + str(bx) + ", y=" + str(by) + ", w=" + str(bw) + ", h=" + str(bh) + " )"
v = True if sw['verbose'] >= 3 else False
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Enter")
# ~ If any available pane is a perfect fit, link to linkid, mark as processed, return
for pane in l_panes:
# Disqualifiers
if 's' in pane: continue # Skip processed panes
# Perfect fit?
if pane['x'] == bx and pane['y'] == by and pane['w'] == bw and pane['h'] == bh:
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + \
": Linking pane " + str(pane['n']) + " to " + str(this_linkid))
pane['l'] = this_linkid
pane['s'] = True # Linked to tmux[] / Disqualified from further consideration
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Exit")
return
# ~ Search panes for clean break, if found then split, reenter 1, reenter 2, return
# This could be optimized (e.g., skip find_clean_break if axis line has already been checked)
for pane in l_panes:
# Disqualifiers
if 's' in pane: continue # Processed panes are going to be out of bounds
if pane['y'] >= by+bh or pane['y']+pane['h'] <= by: continue # Fully out of bounds
if pane['x'] >= bx+bw or pane['x']+pane['w'] <= bx: continue # Fully out of bounds
at = ""
# Split at top edge?
if pane['y'] > by:
if SplitProcessor_FindCleanBreak( sw, True, pane['y'], l_panes, bx, by, bw, bh ):
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Split vert at top of pane " + str(pane['n']))
at = pane['y']
# Split at bottom edge?
if pane['y']+pane['h'] < by+bh:
if SplitProcessor_FindCleanBreak( sw, True, pane['y']+pane['h'], l_panes, bx, by, bw, bh ):
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Split vert at bottom of pane " + str(pane['n']))
at = pane['y']+pane['h']
# Perform vertical split
if at:
linkid_1 = this_linkid
SplitProcessor_SplitWindow( sw, dim, this_linkid, linkid, l_split, l_links, 'v', bh-(at-by) )
linkid_2 = linkid[0]
SplitProcessor_FillerRecursive(sw, dim, linkid, l_split, l_links, l_panes, linkid_1, bx, by, bw, at-by)
SplitProcessor_FillerRecursive(sw, dim, linkid, l_split, l_links, l_panes, linkid_2, bx, at, bw, bh-(at-by))
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Exit")
return
# Split at left edge?
if pane['x'] < bx:
if SplitProcessor_FindCleanBreak( sw, False, pane['x'], l_panes, bx, by, bw, bh ):
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Split horz at left of pane " + str(pane['n']))
at = pane['x']
# Split at right edge?
if pane['x']+pane['w'] < bx+bw:
if SplitProcessor_FindCleanBreak( sw, False, pane['x']+pane['w'], l_panes, bx, by, bw, bh ):
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Split horz at right of pane " + str(pane['n']))
at = pane['x']+pane['w']
# Perform horizontal split
if at:
linkid_1 = this_linkid
SplitProcessor_SplitWindow( sw, dim, this_linkid, linkid, l_split, l_links, 'h', bw-(at-bx) )
linkid_2 = linkid[0]
SplitProcessor_FillerRecursive(sw, dim, linkid, l_split, l_links, l_panes, linkid_1, bx, by, at-bx, bh)
SplitProcessor_FillerRecursive(sw, dim, linkid, l_split, l_links, l_panes, linkid_2, at, by, bw-(at-bx), bh)
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": Exit")
return
# ~ If reached, user specified an unsupported layout that will be detected by caller, return
if v: sw['print']("(3) " + idstr(bx, by, bw, bh) + ": No match found, unsupported layout")
return
def SplitProcessor( sw, wg, iw, ih, list_panes ): # list_split, list_links
#
# Split window into panes
#
linkid = [ 1001 ] # Incrementing number for cross-referencing (0 is reserved)
# The linkid number is a unique identifier used to track the tmux panes and cross-reference them when the
# window is fully divided to get the final pane index for a particular pane. This is an essential link
# because panes are renumbered as splits occur, and before they're assigned to tmuxomatic pane ids.
# Note: 'inst_w' and 'inst_h' are the dimensions when split, the first pane uses full dimensions.
# Note: The first pane does not use the entires 'split' or 'tmux'.
list_split = [ { 'linkid': linkid[0], 'split': "", 'tmux': 65536, 'inst_w': iw, 'inst_h': ih, 'per': "100.0" } ]
list_links = [ ( linkid[0], 0 ) ] # List of cross-references (linkid, pane_tmux)
# Run the recursive splitter
windowgram_w, windowgram_h = wg.Analyze_WidthHeight() # TODO: Clean up remaining wg inlines
dim = {}
dim['win'] = [ windowgram_w, windowgram_h ]
dim['scr'] = [ iw, ih ]
SplitProcessor_FillerRecursive( \
sw, dim, linkid, list_split, list_links, list_panes, linkid[0], 1, 1, windowgram_w, windowgram_h )
# Return useful elements
return list_split, list_links
##----------------------------------------------------------------------------------------------------------------------
##
## Windowgram miscellaneous
##
##----------------------------------------------------------------------------------------------------------------------
class Windowgram_Miscellaneous():
@staticmethod
def SortPanes(layout): # list_panes, layout
# Sort top to bottom, left to right, move into list (layout[] -> list_panes[])
list_panes = [] # List of user defined panes (derived from windowgram)
while len(layout):
pane = ""
for it in layout:
if not pane: pane = it
elif layout[it]['y'] < layout[pane]['y']: pane = it
elif layout[it]['y'] == layout[pane]['y'] and layout[it]['x'] < layout[pane]['x']: pane = it
list_panes.append(layout[pane].copy()) # Add to list
del layout[pane] # Remove from dict
return list_panes, layout
@staticmethod
def PaneOverlap(list_panes): # overlap_pane1, overlap_pane2
# Finds the first overlap and returns it
for pane1 in list_panes:
for pane2 in list_panes:
if pane1 != pane2:
# Readability
p1x1 = pane1['x']
p1x2 = p1x1 + pane1['w']
p1y1 = pane1['y']
p1y2 = p1y1 + pane1['h']
p2x1 = pane2['x']
p2x2 = p2x1 + pane2['w']
p2y1 = pane2['y']
p2y2 = p2y1 + pane2['h']
# Overlap detection
if p1x1 < p2x2 and p1x2 > p2x1 and p1y1 < p2y2 and p1y2 > p2y1:
return pane1['n'], pane2['n']
return None, None
##----------------------------------------------------------------------------------------------------------------------
##
## Windowgram class
##
## Interface for the general-purpose use of windowgram data. Instances of this class should use the name wg.
##
## Note: Most algorithms do not account for windowgrams of varying widths (e.g., "12\n123\n"). Such windowgrams are
## considered invalid in most cases, and where affective they should be fixed or rejected prior to use.
##
## TODO:
##
## Update all uses of windowgram to use a wg instance, instead of instantiating to use a method
## Move splitter code into this library, it's used for windowgram compatibility detection
## Move flex commands into this class, or an accompanying class, free of shell interface concerns
## Move this class into a library for use in other applications
##
##----------------------------------------------------------------------------------------------------------------------
##
## Windowgram Formats:
##
## The user deals with the raw format, all other formats are used internally for processing.
##
## ------- -------------------------- ------------------ --------------------------------------------------------
## Data Example Value Variable Description
## ------- -------------------------- ------------------ --------------------------------------------------------
## Raw "12\n34 # etc\n" windowgram_raw The file input and output, may have spaces or comments
## String "12\n34\n" windowgram_string Stripped lines delimited by "\n", no spaces or comments
## Lines [ "12", "34" ] windowgram_lines List of lines, pure windowgram data (no delimiters)
## Chars [['1', '2'], ['3', '4']] windowgram_chars List of chars, pure windowgram data (no delimiters)
## Parsed {'a': {'x': 1, ...}, ...} windowgram_parsed Processed dictionary of panes with keys: n, x, y, w, h
## Mosaic (base, [[ w, m ], ...]) windowgram_mosaic Pairs of windowgram and mask, ordered bottom to top
## ------- -------------------------- ------------------ --------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
##
## Windowgram Groups:
##
## 1 SPLIT ... 1 windowgram, 1 pane
##
## 112... TILED ... 1 windowgram, 5 panes, transparency
## 452...
## 433...
##
## 1222 444. .... LAYERED ... 3 windowgrams, 5 panes, implicit overlaps, explicit overlaps, transparency
## 3333 444. .555
## 3333 .... .555
##
##----------------------------------------------------------------------------------------------------------------------
##
## Pane arrangement types:
##
## Name Example Description
## ----------------------- ----------- ------------------------------------------------------------------------
## Implicit Overlaps 12 Pane 1 overlaps pane 2, this is an implicit overlap
## 22
##
## Explicit Overlaps 11 22 Pane 2 overlaps pane 1, explicit implies multiple layers are used
## 11 22
##
## Positioned 112 These panes cannot be defined using nested splits, however these are
## 452 valid in other environments where explicit positioning is possible
## 433
## ----------------------- ----------- ------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------------------------------
##
## Support analysis types:
##
## Name Support Description
## -------- -------------- -------------------------------------------------------------------------------------
## split tmux, ???, os Fully split compatible, no overlaps of either kind, no positioned panes
## tiled ???, os No overlaps, supports positioned panes, not bound to a split mechanism for layout
## layered os Has one or more layers with implicit overlaps and/or explicit overlaps
## -------- -------------- -------------------------------------------------------------------------------------
##
## The "???" represents a hypothetical console-based tmux-like system with more flexible positioning. Not necessarily
## with overlap like a typical graphical user interface, though if it did it would then by extension support layered
## windowgrams. Does dvtm support positioning?
##
##----------------------------------------------------------------------------------------------------------------------
linestrip = lambda line: (line[:line.find("#")] if line.find("#") >= 0 else line).strip()
##
## To support masking, an extended set of pseudo-panes must be recognized as valid within windowgram class methods
##
def ValidPane(ch, extend=False): return True if (ch in PANE_CHARACTERS or (extend and ch in PANE_RESERVED)) else False
def ValidPanes(extend=False): return (PANE_CHARACTERS + PANE_RESERVED) if extend else PANE_CHARACTERS
def AllPanes(this, used, extend=False): return "".join(list(set([ch for ch in (this+used) if ValidPane(ch, True)])))
##
## Windowgram Format Conversions
##
class Windowgram_Convert():
## String <-> Lines ... windowgram_lines == [ "12", "34", ... ]
@staticmethod
def String_To_Lines(windowgram):
return [ linestrip(line) for line in list(filter(None, (windowgram+"\n").split("\n"))) ] # No blank lines
@staticmethod
def Lines_To_String(windowgram_lines):
return "\n".join([ line for line in windowgram_lines ]) + "\n" # Each line has one \n
## String <-> Chars ... windowgram_chars == [ ['1', '2'], ['3', '4'], ... ]
@staticmethod
def String_To_Chars(windowgram):
# A list of lists, each containing one or more single characters representing a line, strips empty runs
return [ r for r in [ [ ch for ch in list(ln) ] for ix, ln in enumerate(windowgram.split("\n")[:-1]) ] if r ]
@staticmethod
def Chars_To_String(windowgram_chars):
return Windowgram_Convert.Lines_To_String( [ "".join(line_chars) for line_chars in windowgram_chars ] )
## String <-> Parsed ... windowgram_parsed == { 'Q': { 'n': 'Q', 'x': 1, 'y': 1, 'w': 1, 'h': 1 }, ... }
@staticmethod
def String_To_Parsed(windowgram, extend=False): # windowgram_parsed, error_string, error_line
windowgram_lines = Windowgram_Convert.String_To_Lines(windowgram)
windowgram_parsed = {}
panes_y = 0 # Line number
try:
panes_x = panes_y = width = 0
for ix, line in enumerate(windowgram_lines):
if not line: continue
panes_y += 1
panes_x = 0
for ch in line:
if not ValidPane(ch, extend):
raise Exception("Windowgram must contain valid identifiers: [0-9a-zA-Z]")
if panes_y > 1 and len(line) != width:
raise Exception("Windowgram width does not match previous lines")
else:
if width == 0: width = len(line)
for ch in line:
panes_x += 1
if not ValidPane(ch, extend):
raise Exception("Windowgram must contain valid identifiers: [0-9a-zA-Z]")
# Builds "bounding box" around pane for easy error detection through overlap algorithm
if not ch in windowgram_parsed:
# New pane
windowgram_parsed[ch] = { 'n': ch, 'x': panes_x, 'y': panes_y, 'w': 1, 'h': 1 }
else:
# Expand width
x2 = panes_x - windowgram_parsed[ch]['x'] + 1
if x2 > windowgram_parsed[ch]['w']:
windowgram_parsed[ch]['w'] = x2
# Expand height
y2 = panes_y - windowgram_parsed[ch]['y'] + 1
if y2 > windowgram_parsed[ch]['h']:
windowgram_parsed[ch]['h'] = y2
# Update x
if windowgram_parsed[ch]['x'] > panes_x:
windowgram_parsed[ch]['x'] = panes_x
if not windowgram_parsed: raise Exception("Windowgram not specified")
except Exception as error:
return None, str(error), panes_y
return windowgram_parsed, None, None
@staticmethod
def Parsed_To_String(windowgram_parsed): # windowgram_string
# TODO: Probably should do error handling
windowgram_list = []
for paneid in windowgram_parsed.keys():
pane = windowgram_parsed[paneid]
for y in range( pane['y'], pane['y'] + pane['h'] ):
for x in range( pane['x'], pane['x'] + pane['w'] ):
ix = int(x) - 1
iy = int(y) - 1
while len(windowgram_list) <= iy: windowgram_list.append([])
while len(windowgram_list[iy]) <= ix: windowgram_list[iy].append([])
windowgram_list[iy][ix] = pane['n']
windowgram_string = ""
for line in windowgram_list:
windowgram_string += "".join(line) + "\n"
return windowgram_string
## String <-> Mosaic ... windowgram_mosaic == ( wg_base, [ [ wg_data, wg_mask ], [ wg_data, wg_mask ], ... ] )
@staticmethod
def String_To_Mosaic(windowgram_string, mask_string_list): # windowgram_mosaic
# Implemented for reference only; actual production cases are custom implementations of flex commands
windowgram_pairs = []
wg = Windowgram(windowgram_string)
for mask_string in mask_string_list:
pair_w = Windowgram( wg.Export_String() )
panes = pair_w.Panes_FromMask( mask_string )
used, unused = pair_w.Panes_GetUsedUnused()
strip = "".join( [ pane for pane in list(used) if pane not in list(panes) ] )
used, unused = PaneList_MovePanes( used, unused, strip )
pair_w.Panes_Renamer( unused, "." )
windowgram_pairs.append( [ pair_w, Windowgram(mask_string) ] )
windowgram_mosaic = [ Windowgram(windowgram_string), windowgram_pairs ]
return windowgram_mosaic
@staticmethod
def Mosaic_To_String(windowgram_mosaic): # windowgram_string
# Merges pairs of [ wg_data, wg_mask ] onto wg_base, ordered bottom to top
wg_base, pairs = windowgram_mosaic
s_l = wg_base.Export_Lines() # Source
for w, m in pairs:
t_l = w.Export_Lines() # Target
m_l = m.Export_Lines() # Mask
w_l, s_l = s_l, [] # Work
for iy in range(len(w_l)):
line = ""
for ix in range(len(w_l[iy])): line += w_l[iy][ix] if m_l[iy][ix] != MASKPANE_1 else t_l[iy][ix]
s_l.append(line)
return Windowgram_Convert.Lines_To_String( s_l )
## String -> Lines -> String ... Full cycle purification of the windowgram by stripping comments and whitespace
@staticmethod
def PurifyString(windowgram):
return Windowgram_Convert.Lines_To_String( Windowgram_Convert.String_To_Lines( windowgram ) )
## Transpose Chars ... Swaps columns and rows, essentially mirror ccw90
@staticmethod
def Transpose_Chars(windowgram_chars):
windowgram_chars_transposed = []
for x in range( len(windowgram_chars[0]) ):
windowgram_chars_transposed += [ [ windowgram_chars[y][x] for y in range( len(windowgram_chars) ) ] ]
return windowgram_chars_transposed
## Transpose Pane ... Swaps [x with y] and [w with h] in a parsed pane (dict entry of windowgram_parsed)
@staticmethod
def Transpose_ParsedPane(windowgram_parsedpane):
windowgram_parsedpane_transposed = copy.deepcopy( windowgram_parsedpane )
windowgram_parsedpane_transposed['x'], windowgram_parsedpane_transposed['y'] = \
windowgram_parsedpane_transposed['y'], windowgram_parsedpane_transposed['x']
windowgram_parsedpane_transposed['w'], windowgram_parsedpane_transposed['h'] = \
windowgram_parsedpane_transposed['h'], windowgram_parsedpane_transposed['w']
return windowgram_parsedpane_transposed
## Transpose Windowgram
@staticmethod
def Transpose_Windowgram(wg):
wg_transposed = copy.deepcopy(wg)
wg_transposed.Import_Chars( Windowgram_Convert.Transpose_Chars( wg.Export_Chars() ) )
return wg_transposed
## Transpose Multiple Windowgrams
@staticmethod
def Transpose_Windowgrams(*batch_wg):
batch_transposed = []
for wg in batch_wg:
batch_transposed.append( Windowgram_Convert.Transpose_Windowgram( wg ) )
return batch_transposed
##
## Mosaics Equal ... Used for comparison purposes in testing, would not be needed if windowgram_mosaic used strings
##
def Mosaics_Equal(windowgram_mosaic_1, windowgram_mosaic_2): # True if equal else False
if windowgram_mosaic_1[0] != windowgram_mosaic_2[0]: return False
for m1, m2 in zip( windowgram_mosaic_1[1], windowgram_mosaic_2[1] ):
if m1 != m2: return False # Necessitates Windowgram.__eq__()
return True
##
## Windowgram Group Conversions
##
## WindowgramGroupPattern Single string of 1 or more packed windowgrams with arbitrary padding
## This pattern must be top aligned to accommodate differently sized windowgrams
## See flex unit testing for examples of properly constructed objects
##
## WindowgramGroupList List of strings, where each string is a windowgram with optional padding
## Basically just a list of windowgram_string objects
##
class WindowgramGroup_Convert():
## Pattern <-> List
@staticmethod
def Pattern_To_List(windowgramgroup_pattern):
windowgramgroup_list = []
first_linewithcol = []
for line in windowgramgroup_pattern.split("\n"):
if not line.strip(): first_linewithcol = []
else:
# Build list of lines according to starting column of character run
# * Discard any out-of-bounds character runs (as defined by first line)
# * Insert blank lines where no runs were found
def colsplit(line): # linewithcol
linewithcol = [] # [ (line, col), ... ]
for col, ch in enumerate(list(line)): # Never strip the line or this will fail: "1 2\n 2\n"
if ch == " " or ch == "\t" or not linewithcol:
if not linewithcol or linewithcol[-1][0]: linewithcol.append(["", None])
if ch != " " and ch != "\t":
if linewithcol[-1][1] is None: linewithcol[-1][1] = col
linewithcol[-1][0] += ch
return linewithcol
linewithcol = colsplit(line)
# Refine list using first line as a guide. This will insert columns that are missing and remove
# columns that do not match the title. Each windowgram line must match the one before it, or it's
# dropped, so a user must take care in editing windowgramgroup_pattern objects or data disappears.
# TODO: Slightly more sophisticated matching that will compensate for unaligned windowgrams by
# snapping to the nearest column; this is just be an estimate, user error loss is still possible.
if first_linewithcol:
# Strip columns with unexpected positions
drop = []
for ix1, (_, col1) in enumerate(linewithcol):
ix2 = [ ix2 for ix2, (_, col2) in enumerate(first_linewithcol) if col2 == col1 ]
if not ix2: drop.append(ix1)
for ix in reversed(drop): linewithcol.pop(ix)
# Insert missing columns
for ix1, (_, col1) in enumerate(first_linewithcol):
if not [ (ix2, col2) for ix2, (_, col2) in enumerate(linewithcol) if col2 == col1 ]:
linewithcol.insert( ix1, ["", col1] )
# First line expands the collation list
if not first_linewithcol:
first_linewithcol = linewithcol
for n in range(len(first_linewithcol)): windowgramgroup_list.append([])
# Insert lines into the collation list
linewithcol = linewithcol[:len(first_linewithcol)] # Assure truncation
for n in range(len(first_linewithcol)):
windowgramgroup_list[-(len(first_linewithcol)-n)].append(linewithcol[n][0])
# Return as list of windowgrams with blank lines removed
windowgramgroup_list = [ "\n".join([ l2 for l2 in l if l2 ])+"\n" for ix, l in enumerate(windowgramgroup_list) ]
return [ _ for _ in windowgramgroup_list if _ != "\n" ]
@staticmethod
def List_To_Pattern(windowgramgroup_list, maxwidth, lpad=0, mpad=1, testmode=False):
windowgramgroup_pattern = ""
windowgram_line_arr = []
windowgram_width_arr = []
# Build arrays of lines and widths
for windowgram_string in windowgramgroup_list:
windowgram_lines = Windowgram_Convert.String_To_Lines( windowgram_string )
windowgram_line_arr.append( windowgram_lines )
windowgram_width_arr.append( list( reversed( sorted( [ len(line) for line in windowgram_lines ] ) ) )[0] )
spent = 0
while spent < len(windowgram_width_arr):
# Determine how many of the remaining windowgrams will fit on this windowgramgroup row
tmplen = lpad + windowgram_width_arr[spent]
spending = spent
spent += 1
while spent < len(windowgram_width_arr):
tmplen += mpad + windowgram_width_arr[spent]
if tmplen > maxwidth: break
spent += 1
# Skip line between windowgram runs
if windowgramgroup_pattern: windowgramgroup_pattern += "\n"
# Vertically pad the windowgrams for zip iteration
batch = windowgram_line_arr[spending:spent]
height = list( reversed( sorted( [ len(lines) for lines in batch ] ) ) )[0]
batch = [ lines if len(lines) >= height else lines + ([" "]*(height-len(lines))) for lines in batch ]
# Print this windowgramgroup row
for ix in range( height ):
row = [ lines[ix] if len(lines[ix]) >= windowgram_width_arr[spending+ix2] \
else lines[ix] + (" "*(windowgram_width_arr[spending+ix2]-len(lines[ix]))) \
for ix2, lines in enumerate( batch ) ]
windowgramgroup_pattern = windowgramgroup_pattern + (" "*lpad) + ((" "*mpad).join(row)) + "\n"
# Strip blank spaces from end of line
windowgramgroup_pattern = "\n".join( [ line.rstrip() for line in windowgramgroup_pattern.split("\n") ] )
# For ease of testing, add newline prefix and padding suffix of specified length
if testmode is not False:
windowgramgroup_pattern = "\n" + windowgramgroup_pattern + (" "*testmode)
return windowgramgroup_pattern
##
## Windowgram
##
## Error handling is done by polling GetErrorPair() after calling an error-generating method
##
## Optimization Notes:
##
## Tried a simple windowgram object cache for this class (to avoid repetitious conversions) as a quick attempt to
## optimize class usage. It reduced overall unit testing time to about half. However, because objects like lists
## are copied as references, deep copies were necessary when moving objects in and out of the cache. The resulting
## cache was significantly slower, so it was removed, but the code has been saved. Additions may make the cache
## viable, including hash for comparison, and converting windowgram objects to classes that automatically detach
## from the cache upon modification; both require a significant amount work.
##
class Windowgram():
def __init__(self, windowgram_raw, extend=False):
# Mask mode (extend parameter) should only be enabled here to avoid type uncertainty
self.extend = extend # For masking
self.change_count = 0
self.change_query = 0
self.Import_Raw(windowgram_raw)
self.NoChange()
def __eq__(self, other):
# Needed by Mosaics_Equal()
return True if self.Export_String() == other.Export_String() else False
def Reset(self):
self.windowgram_string = None
self.error_string = None
self.error_line = 0
self.change_count += 1
def GetErrorPair(self): # Resets error when polled. Returns: error_string, error_line
error_string = self.error_string
error_line = self.error_line
self.error_string = None
self.error_line = 0
return error_string, error_line
def Is_Extended(self):
return self.extend
def Disable_Extended(self):
self.extend = False
def Copy(self):
return Windowgram( self.Export_String(), self.Is_Extended() )
##
## Loaders
##
def Load_Chars(self, windowgram_chars):
self.Import_Chars(windowgram_chars)
return self
def Load_Parsed(self, windowgram_parsed):
self.Import_Parsed(windowgram_parsed)
return self
##
## Imports
##
def Import_Raw(self, windowgram_raw):
self.Reset()
self.windowgram_string = Windowgram_Convert.PurifyString( windowgram_raw ) # Strip comments and whitespace
self.Changed()
def Import_String(self, windowgram_string):
self.Import_Raw( windowgram_string )
def Import_Lines(self, windowgram_lines):
self.Import_Raw( Windowgram_Convert.Lines_To_String(windowgram_lines) )
def Import_Chars(self, windowgram_chars):
self.Import_Raw( Windowgram_Convert.Chars_To_String(windowgram_chars) )
def Import_Parsed(self, windowgram_parsed):
self.Import_Raw( Windowgram_Convert.Parsed_To_String(windowgram_parsed) )
def Import_Mosaic(self, windowgram_mosaic):
self.Import_Raw( Windowgram_Convert.Mosaic_To_String(windowgram_mosaic) )
def Import_Wg(self, wg):
self.Import_Raw( wg.Export_String() )
##
## Exports ... The windowgram is only converted upon request
##
def Export_String(self):
return self.windowgram_string
def Export_Lines(self):
windowgram_lines = Windowgram_Convert.String_To_Lines( self.windowgram_string )
return windowgram_lines
def Export_Chars(self):
windowgram_chars = Windowgram_Convert.String_To_Chars( self.windowgram_string )
return windowgram_chars
def Export_Parsed(self): # Generates error
windowgram_parsed, error_string, error_line = \
Windowgram_Convert.String_To_Parsed( self.windowgram_string, self.extend )
if error_string:
windowgram_parsed = {}
self.error_string = error_string
self.error_line = error_line
return windowgram_parsed
def Export_Mosaic(self): # Generates error
windowgram_mosaic = []
self.error_string = "Not implemented"
self.error_line = 0
return windowgram_mosaic
##
## Analyze windowgram for metrics and supportability, performed on demand
##
@staticmethod
def Analyze_WidthHeight_Static(windowgram_lines):
return [ (max([ len(line) for line in windowgram_lines ]) if windowgram_lines else 0), len( windowgram_lines ) ]
def Analyze_WidthHeight(self):
windowgram_lines = self.Export_Lines()
return Windowgram.Analyze_WidthHeight_Static( windowgram_lines )
def Analyze_IsBlank(self):
return True if not max(self.Analyze_WidthHeight()) else False
def Analyze_Layers(self):
return 1 # Fixed for now
def Analyze_Type(self, relative):
# Determine compatibility (split, tiled, layered)
analysis_type = ""
while True:
# Detect layered
self.error_string = None
windowgram_parsed = self.Export_Parsed()
if self.error_string:
analysis_type = "ERROR"
break
list_panes, windowgram_parsed = Windowgram_Miscellaneous.SortPanes( windowgram_parsed )
overlap_pane1, overlap_pane2 = Windowgram_Miscellaneous.PaneOverlap( list_panes )
if overlap_pane1 or overlap_pane2:
analysis_type = "layered" # Implicit
break
# Detect split
sw = { 'print': None, 'verbose': 0, 'relative': relative, 'scanline': False } # No print
list_split, list_links = SplitProcessor( sw, self, 1024, 1024, list_panes ) # Avoid layout errors
splityes = True
for split in list_split:
# Readability
ent_panes = ''
for i in list_panes:
if 'l' in i and i['l'] == split['linkid']:
ent_panes = i
break
if not ent_panes:
splityes = False
break
if splityes:
analysis_type = "split"
break
# Assume tiled
analysis_type = "tiled"
break
return analysis_type
##
## Change Detection (has change_count been incremented since last query)
##
def Changed(self): self.change_count += 1
def NoChange(self): self.change_query = self.change_count
def HasChanged_SenseOnly(self): return True if self.change_count == self.change_query else False
def HasChanged(self): flag = self.HasChanged_SenseOnly() ; NoChange() ; return flag
##
## Pane / Panes
##
def Panes_GetUsedUnused(self): # used, unused
# Mutually exclusive list of pane ids for given windowgram
windowgram_lines = self.Export_Lines()
used = "".join( sorted( list(set(list("".join(windowgram_lines)))),
key=lambda x: ValidPanes(self.extend).find(x) ) )
unused = "".join( [ paneid for paneid in ValidPanes(self.extend) if paneid not in used ] )
return used, unused
def Panes_GetNewPaneId(self, preferred=None): # newpaneid, error
# Input preferred: None == First available pane / paneid == Specified if valid
used, unused = self.Panes_GetUsedUnused()
if not unused: return None, "All pane identifiers have been used"
if preferred is None: return unused[0], None
if preferred not in ValidPanes(self.extend): return None, "Invalid pane identifier"
if preferred not in unused: return None, "Pane id `" + preferred + "` is in use"
return preferred, None
def Panes_PanesNotUsed(self, panes):
panes = list(panes)
for line in self.Export_Lines():
for ch in line:
if ch in panes:
panes = [ pane for pane in panes if pane != ch ]
if not panes:
return None
return "".join(panes)
def Panes_PanesNotUsed_Message(self, panes):
undef = self.Panes_PanesNotUsed(panes)
if not undef: return None
return "pane" + ["","s"][len(undef)!=1] + " '" + undef + "' " + ["is","are"][len(undef)!=1] + " not in use"
def Panes_HasPane(self, pane):
return True if self.Panes_PanesNotUsed(pane) is None else False
def Panes_PaneXYXY(self, pane): # x1, y1, x2, y2
if not self.Panes_HasPane( pane ): return 0, 0, 0, 0
windowgram_lines = self.Export_Lines()
x2 = y2 = -1
x1 = len(windowgram_lines[0])
y1 = len(windowgram_lines)
for y, line in enumerate(windowgram_lines):
for x, char in enumerate(line):
if char == pane:
if x < x1: x1 = x
if x > x2: x2 = x
if y < y1: y1 = y
if y > y2: y2 = y
return x1+1, y1+1, x2+1, y2+1
def Panes_PaneXYWH(self, pane): # x, y, w, h
if not self.Panes_HasPane( pane ): return 0, 0, 0, 0
x1, y1, x2, y2 = self.Panes_PaneXYXY( pane )
return x1, y1, x2-x1+1, y2-y1+1
def Panes_Renamer(self, panes, pane):
# Supports multiple panes renaming, use only when you know the results will be valid
new_lines = []
for line in self.Export_Lines():
new_lines.append( "".join( [ (ch if ch not in panes else pane) for ch in line ] ) )
self.Import_Lines( new_lines )
def Panes_FromMask(self, mask_string):
# Returns unique panes covered by specified mask
lines_w = self.Export_Lines()
lines_m = Windowgram_Convert.String_To_Lines( mask_string )
panes = "".join( set( \
[ ch_w for ch_w, ch_m in zip( "".join(lines_w), "".join(lines_m) ) if ch_m == MASKPANE_1 ] ) )
return panes
def Panes_Exist(self):
# True if any panes exist, including transparency
return True if self.Export_Lines() != [] else False
##
## Edge (full) ... Edge format is the position of a full windowgram edge: xy
##
def Edge_PanesAlong(self, axis, fulledge):
# Returns a unique unsorted set of panes that touch the fulledge on either side
windowgram_chars = self.Export_Chars()
if axis == "v": windowgram_chars = Windowgram_Convert.Transpose_Chars( windowgram_chars )
return "".join( set( (windowgram_chars[fulledge-1] if fulledge > 0 else []) +
(windowgram_chars[fulledge] if fulledge < len(windowgram_chars) else []) ) )
@staticmethod
def Edge_Extract_Static(windowgram_lines, axis, fulledge, direction):
# Returns a string of characters that border the one side of the fulledge that is opposite the direction
wgw, wgh = Windowgram.Analyze_WidthHeight_Static( windowgram_lines )
# Note special cases (effectively "drag r * l 1" and "drag l * r 1") return transparency since those edges
# are completely out of range. The smudgecore caller translates this transparency into a subtraction.
if axis == "v":
if (direction == "" and fulledge == wgw) or (direction == "-" and fulledge == 0): return MASKPANE_X * wgh
return "".join( [ line[fulledge-1 if direction == "-" else fulledge] for line in windowgram_lines ] )
else: # if axis == "h":
if (direction == "" and fulledge == wgh) or (direction == "-" and fulledge == 0): return MASKPANE_X * wgw
return windowgram_lines[fulledge-1 if direction == "-" else fulledge]
def Edge_Extract(self, axis, fulledge, direction):
# Returns a string of characters that border the one side of the fulledge that is opposite the direction
windowgram_lines = self.Export_Lines()
return Windowgram.Edge_Extract_Static( windowgram_lines, axis, fulledge, direction )
def Edge_ClipOuterTransparents(self):
# Any fully transparent lines on any outer edge are clipped entirely
wgl = self.Export_Lines()
wgx1, wgy1, (wgx2, wgy2) = 1, 1, self.Analyze_WidthHeight()
wgx2 -= 1 ; wgy2 -= 1 # Aligns edges so they may be extracted directionally
while wgx1-1 <= wgx2 and set(Windowgram.Edge_Extract_Static( wgl, "v", wgx1, "-" )) == {MASKPANE_X}:
wgx1 = wgx1 + 1
while wgx2 >= wgx1-1 and set(Windowgram.Edge_Extract_Static( wgl, "v", wgx2, "" )) == {MASKPANE_X}:
wgx2 = wgx2 - 1
while wgy1-1 <= wgy2 and set(Windowgram.Edge_Extract_Static( wgl, "h", wgy1, "-" )) == {MASKPANE_X}:
wgy1 = wgy1 + 1
while wgy2 >= wgy1-1 and set(Windowgram.Edge_Extract_Static( wgl, "h", wgy2, "" )) == {MASKPANE_X}:
wgy2 = wgy2 - 1
wgc = self.Export_Chars()
self.Import_Chars( [ [ wgc[iy][ix] for ix in range(wgx1-1, wgx2+1) ] for iy in range(wgy1-1, wgy2+1) ] )
##
## Edge (sub) ... Edge format is the description of a sub edge within the windowgram: [ xy, from, to ]
##
def Edge_PanesAlongSub(self, axis, edge):
# Returns a unique unsorted set of panes that touch the edge on either side
windowgram_chars = self.Export_Chars()
w = len(windowgram_chars[0])
h = len(windowgram_chars)
panes = []
if axis == "v":
for y in range(edge[1], edge[2]):
if edge[0] == 0: panes.append( windowgram_chars[y][0] )
elif edge[0] == w: panes.append( windowgram_chars[y][w-1] )
else: panes += [ windowgram_chars[y][edge[0]-1], windowgram_chars[y][edge[0]] ]
else: # if axis == "h":
if edge[0] is not 0: panes += [ windowgram_chars[edge[0]-1][x] for x in range(edge[1], edge[2]) ]
if edge[0] is not h: panes += [ windowgram_chars[edge[0] ][x] for x in range(edge[1], edge[2]) ]
return "".join( set( panes ) )
##
## CopyMasked
##
## Minimal enclosing windowgram of the specified mask
## Supports non-rectangular masks
## MASKPANE_1: Window data
## MASKPANE_0: Pane transparency
## Mask dimensions must match windowgram dimensions
##
def CopyMasked_Out(self, wg_mask):
px, py, pw, ph = wg_mask.Panes_PaneXYWH(MASKPANE_1)
wgc_self = self.Export_Chars()
wgc_mask = wg_mask.Export_Chars()
wgc_new = [ [ "" for _ in range(pw) ] for _ in range(ph) ]
for ix in range( px, px+pw ):
for iy in range( py, py+ph ):
if wgc_mask[iy-1][ix-1] == MASKPANE_1: wgc_new[iy-py][ix-px] = wgc_self[iy-1][ix-1]
else: wgc_new[iy-py][ix-px] = MASKPANE_X
return Windowgram("", True).Load_Chars(wgc_new) # May contain extended characters
def CopyMasked_In(self, wg_mask, wg_data):
windowgram_chars = self.Export_Chars()
frame_chars = wg_mask.Export_Chars()
image_chars = wg_data.Export_Chars()
fx, fy, fw, fh = wg_mask.Panes_PaneXYWH(MASKPANE_1)
iw, ih = wg_data.Analyze_WidthHeight()
if iw and ih:
for iy in range(ih):
for ix in range(iw):
if frame_chars[fy-1+iy][fx-1+ix] == MASKPANE_1:
windowgram_chars[fy-1+iy][fx-1+ix] = image_chars[iy][ix]
self.Load_Chars(windowgram_chars)
##
## Windowgram Masking Functions
##
def Windowgram_Mask_Generate(wg, panes): # wg_mask
# Returns a windowgram with non-standard panes for use with masking: MASKPANE_0 for zero, MASKPANE_1 for one
windowgram_parsed = wg.Export_Parsed()
width, height = wg.Analyze_WidthHeight()
# Produce mask
mask_windowgram_chars = []
while len(mask_windowgram_chars) < height: mask_windowgram_chars.append( list(MASKPANE_0 * width) )
for key in list(panes):
pane = windowgram_parsed[key]
for y in range( pane['y'], pane['y'] + pane['h'] ):
for x in range( pane['x'], pane['x'] + pane['w'] ):
mask_windowgram_chars[y-1][x-1] = MASKPANE_1
# Return mask as wg instance
wg_mask = Windowgram("", True) # Create a windowgram for masking
wg_mask.Import_Chars( mask_windowgram_chars )
return wg_mask
def Windowgram_Mask_Boolean(wg_mask1, wg_mask2, op):
# Assumes identical size
wgc1 = wg_mask1.Export_Chars()
wgc2 = wg_mask2.Export_Chars()
wgc3 = []
for iy in range(len(wgc1)):
for ix in range(len(wgc1[iy])):
while len(wgc3) <= iy: wgc3.append([])
while len(wgc3[iy]) <= ix: wgc3[iy].append("")
if op == "and":
wgc3[iy][ix] = MASKPANE_1 if (MASKPANE_1 == wgc1[iy][ix] == wgc2[iy][ix]) else MASKPANE_0
else: # Unsupported boolean operation
wgc3[iy][ix] = MASKPANE_0
return Windowgram("", True).Load_Chars(wgc3)
def Windowgram_Mask_Macro_BuildSplitMasks(wg, res_hint, axis_location):
# Produce mutually-exclusive side masks, these are based on the defined edge and the windowgram dimensions
# This macro is shared by: drag, insert, clone
wgw, wgh = wg.Analyze_WidthHeight()
aa, bb = (wgw, wgh) if res_hint == "v" else (wgh, wgw)
mask_0 = dict(x=1, w=axis_location, y=1, h=bb)
mask_1 = dict(x=axis_location+1, w=aa-axis_location, y=1, h=bb)
if res_hint != "v":
mask_0 = Windowgram_Convert.Transpose_ParsedPane(mask_0)
mask_1 = Windowgram_Convert.Transpose_ParsedPane(mask_1)
wgm0 = Windowgram("", True).Load_Parsed(ParsedPanes_Add(MASKPANE_1, mask_0, ParsedPanes_Add(MASKPANE_0, mask_1)))
wgm1 = Windowgram("", True).Load_Parsed(ParsedPanes_Add(MASKPANE_1, mask_1, ParsedPanes_Add(MASKPANE_0, mask_0)))
return wgm0, wgm1
def Windowgram_Mask_Macro_GenerateAndSplitMasks(wg, wgm0, wgm1, panes):
# Generates new masks based on a windowgram, base masks, and a set of panes
# This macro is shared by: drag, insert, clone
wgm0x = Windowgram_Mask_Boolean(wgm0, Windowgram_Mask_Generate(wg, panes), "and")
wgm1x = Windowgram_Mask_Boolean(wgm1, Windowgram_Mask_Generate(wg, panes), "and")
return wgm0x, wgm1x
def Windowgram_Mask_Macro_ValidateRegularity(scalegroup, wgm1, wgm2, res_hint, axis_location):
# Validates masks according to regularity rules (until irregular scalegoups are supported), returns valid or None
# This macro is shared by: drag, insert, clone
def Validate(wgm):
# (1) Any MASKPANE_1 character touching the specified axis edge
if wgm.Panes_HasPane(MASKPANE_1) and not MASKPANE_1 in wgm.Edge_PanesAlong(res_hint, axis_location):
return "The scalegroup '" + scalegroup + "' does not touch the specified edge"
# (2) That the mask is a rectangular shape (for now; see above notes on irregular parallelism)
result, suggestions = groupcore(wgm, MASKPANE_1) # GroupStatus.Invalid_Panes means MASKPANE_1 was not found
if result == GroupStatus.Invalid_Panes or result == GroupStatus.Success: return None
return "The scalegroup '" + scalegroup + "' is an unsupported irregular shape, try making it rectangular"
r1 = Validate(wgm1)
r2 = Validate(wgm2)
return r1 if r1 else (r2 if r2 else None)
##
## Pane List Functions
##
def PaneList_DiffLost(this, that): # lostpanes
# Parameters are Windowgram instances, aka wg
used1, _ = this.Panes_GetUsedUnused()
used2, _ = that.Panes_GetUsedUnused()
lostpanes, _ = Windowgram( "".join( list(set(used1) - set(used2)) ) ).Panes_GetUsedUnused()
return lostpanes
def PaneList_MovePanes(list1, list2, panes): # newlist1, newlist2
# Moves specified batch of panes (if present) from "list1" into "list2" ... Returns new lists in that order
for pane in list(panes):
if pane in ValidPanes() and (pane in list1 or pane not in list2):
# Assert ordering every pass, as in some situations the panes will be unsorted
list1 = "".join([ch for ch in ValidPanes() if ch in list1 and ch != pane])
list2 = "".join([ch for ch in ValidPanes() if ch in list2 or ch == pane])
return list1, list2
def PaneList_AssimilatedSorted(this, that): # this_plus_that_assimilated_and_sorted
return "".join( sorted( set( this + that ), key=lambda x: ValidPanes().find(x) ) )
##
## Other
##
def ParsedPanes_Add(paneid, parsedpane, windowgram_parsed={}):
windowgram_parsed = copy.deepcopy(windowgram_parsed) # Required so the default ({}) won't get shared and corrupted
windowgram_parsed[paneid] = dict( list(parsedpane.items()) + list(dict(n=paneid).items()) )
return windowgram_parsed
##
## Flex: Macros ... See actual usage for examples
##
is_long_axis_vert = lambda axis: True if axis in [ "v", "vertical", "vert" ] else False
is_long_axis_horz = lambda axis: True if axis in [ "h", "horizontal", "horz" ] else False
is_short_axis_vert_vhtblr = lambda axis: True if axis in [ "v", "t", "b" ] else False
is_short_axis_horz_vhtblr = lambda axis: True if axis in [ "h", "l", "r" ] else False
is_true = lambda word, alt=None: \
True if word.lower() in [ "1", "true", "yes" ] + ([ alt.lower() ] if alt else []) else False
valid_directions = [ # These directions are recognized, the list is ordered 0123 == TBRL || NSEW
[ "top", "t", "tp", "north", "n", "up", "u", "over", "above", ], # ix == 0 -> Vertical +
[ "bottom", "b", "bt", "south", "s", "down", "d", "under", "below", ], # ix == 1 -> Vertical -
[ "right", "r", "rt", "east", "e" ], # ix == 2 -> Horizontal -
[ "left", "l", "lt", "west", "w" ], # ix == 3 -> Horizontal +
]
def direction_to_axiswithflag(direction, inverse=False): # axis_as_vh, negate_flag | None, None
for ix, directions_ent in enumerate(valid_directions):
if True in [True if d.lower().strip() == direction.lower().strip() else False for d in directions_ent]:
if ix == 0: return "v", False ^ inverse # Top
if ix == 1: return "v", True ^ inverse # Bottom
if ix == 2: return "h", True ^ inverse # Right
if ix == 3: return "h", False ^ inverse # Left
return None, None
def axiswithflag_to_direction(axis, flag): # direction
if axis == "v" and flag == False: return "t"
if axis == "v" and flag == True : return "b"
if axis == "h" and flag == True : return "r"
if axis == "h" and flag == False: return "l"
return None
def resolve_vhtblr(hint): # Either one of list("vhtblr") or None
if is_long_axis_vert(hint): return "v"
elif is_long_axis_horz(hint): return "h"
return axiswithflag_to_direction( *direction_to_axiswithflag( hint ) ) # "t", "b", "l", "r"
def thruvalid_panes(panes, ignore=""): # Return unmodified if valid else None
if len([ ch for ch in panes if ch not in PANE_CHARACTERS + ignore ]): return None
return panes
def classify_panes(used, unused, panes): # areused, areunused, invalids
areused = areunused = invalids = ""
for paneid in panes:
if paneid in used: areused += paneid
elif paneid in unused: areunused += paneid
else: invalids += paneid
return areused, areunused, invalids
def resolve_size(size, axis_length, inverse, showinv, restrict=True): # error_or_none, inverse_flag, size_as_characters
size_as_characters = 0
try:
original_size = size # Retain original for error messages
while size and size[0] == "-": size = size[1:] # Strip negation
size_type = size_GetType( size )
if size_type is None:
raise Exception( "Invalid size parameter: " + original_size )
if restrict and size_GreaterOrEqualToBaseCharacters( size, axis_length ):
if size_type == "characters": rep = showinv + str(axis_length)
elif size_type == "percentage": rep = showinv + "100%"
else: rep = showinv + "1x" # elif size_type == "multiplier"
raise Exception( "Specified size (" + original_size + \
") is greater or equal to the maximum range (" + rep + ") of this function" )
size_as_characters = size_ConvertToCharacters( size, axis_length )
if size_as_characters is None:
raise Exception( "Invalid size parameter: " + size )
if restrict and size_as_characters >= axis_length: # Shouldn't happen by now, but if it does
raise Exception( "Resulting size (" + showinv + str(size_as_characters) + \
" characters) is greater or equal to the axis length (" + str(axis_length) + ")" )
if size_as_characters < 1:
raise Exception( "Resulting size (" + showinv + str(size_as_characters) + \
" characters) is not valid" )
except Exception as error:
return str(error), None, None
return None, inverse, size_as_characters
##----------------------------------------------------------------------------------------------------------------------
##
## Flex cores
##
## These functions are shared by multiple flex commands.
##
## +----------------+--------------------------------------------------------------+
## | Cores | Used By |
## +----------------+--------------------------------------------------------------+
## | scalecore | scale break drag insert clone |
## | groupcore | join drag insert clone delete flip mirror rotate |
## | edgecore | drag insert clone delete |
## | smudgecore | drag insert clone |
## +----------------+--------------------------------------------------------------+
##
##----------------------------------------------------------------------------------------------------------------------
##
## Scale core ... Scales a windowgram
##
## Callers: scale, break, drag, insert
##
def scalecore_v1(windowgram_string, w_chars, h_chars):
##
## Based on the scale code used in tmuxomatic 1.x
## Used until 2.0, then reactivated in 2.3
##
def scale_one(element, multiplier):
# Scale element using integer rounding, multiplier must be float
q, r = math.modf( float(element - 1) * multiplier )
if q >= .5: r += 1
return int(r) + 1
def scale_windowgram(list_panes, ax, ay): # lost_count
# Scales the windowgram
for paneid in list_panes.keys():
pane = list_panes[paneid]
# The following were conditional prior to 2.4, removed to allow scale to 0 since it's handled by caller
pane['w'] = scale_one( pane['x'] + pane['w'], ax )
pane['h'] = scale_one( pane['y'] + pane['h'], ay )
pane['x'] = scale_one( pane['x'], ax )
pane['y'] = scale_one( pane['y'], ay )
pane['w'] -= pane['x']
pane['h'] -= pane['y']
# Get pane list
list_panes = Windowgram(windowgram_string).Export_Parsed()
# Set the multipliers
ww, wh = Windowgram(windowgram_string).Analyze_WidthHeight()
ax, ay = float(w_chars) / float(ww), float(h_chars) / float(wh)
# Perform the scale
list_panes_scaled = copy.deepcopy( list_panes )
scale_windowgram( list_panes_scaled, ax, ay )
windowgram_string_new = Windowgram_Convert.Parsed_To_String( list_panes_scaled )
return windowgram_string_new
def scalecore_v2(windowgram, w_chars, h_chars):
##
## Simpler but less accurate scale code added in tmuxomatic 2.0
## Used briefly from 2.0 to 2.2
##
from_w, from_h = Windowgram(windowgram).Analyze_WidthHeight()
x_mul = float(w_chars) / float(from_w)
y_mul = float(h_chars) / float(from_h)
windowgram_chars = Windowgram_Convert.String_To_Chars(windowgram)
windowgram_chars_scaled = []
for y in range(0, h_chars):
windowgram_chars_scaled.append( [ windowgram_chars[ int(y/y_mul) ][ int(x/x_mul) ] \
for x in range(0, w_chars) ] )
windowgram_new = Windowgram_Convert.Chars_To_String( windowgram_chars_scaled )
return windowgram_new
def scalecore(windowgram, w_chars, h_chars, retry=None): # TODO: Scale by wg to remove the Windowgram_Convert usage
##
## Main entry for all scale functions
##
windowgram_scaled = "" # Scope, and reset in case of error
# Retry with necessary increment and/or decrement until desired pane dimensions are reached. This is required for
# commands like "break", which need to scale to a specific pane size. There's likely a way to derive these metrics
# directly, but an iterative approach works fine. Verify two resizes are necessary with the following test case.
# Test case Test_FlexCores.test_ScaleCore_ScaleRetries: "new 1 ; break 1 11x1 ; scale 46 1 ; break 5 7x1"
tries = 0
tries_max = 16 # An infinite loop is unlikely, but this maximum will prevent such an occurrence
paneid = exp_w = exp_h = None
if retry and type(retry) is tuple and len(retry) == 3:
paneid, exp_w, exp_h = retry
if Windowgram( windowgram ).Panes_HasPane( paneid ): tries = tries_max
else: paneid = None
# Scale until satisfied; this loop is for pane measurement, since the windowgram should always scale on first try.
if tries < 1: tries = 1
try_w, try_h = w_chars, h_chars
while tries:
# See the scale core discrepancy in the following test case, where v2 loses 3 panes, and v1 does not.
# Test case Test_FlexCores.test_ScaleCore_VersionAssert: "new 1 ; break 1 2x2 ; scale 3x3 ; scale 2x2"
windowgram_scaled = scalecore_v1( windowgram, try_w, try_h ) # Must be v1, see tests
if paneid:
_, _, new_w, new_h = Windowgram( windowgram_scaled ).Panes_PaneXYWH( paneid )
if new_w == exp_w and new_h == exp_h: break
try_w += 1 if new_w < exp_w else -1 if new_w > exp_w else 0
try_h += 1 if new_h < exp_h else -1 if new_h > exp_h else 0
tries -= 1
return windowgram_scaled
##
## Group core ... Tests group of panes for contiguity, returns group capability, if panes are missing it suggests them
##
## Callers: join, drag, insert
##
class GroupStatus:
Success = 1 # Group is valid
Invalid_Panes = 2 # Panes specified are invalid (not in used, or are in unused)
Insufficient_Panes = 3 # Panes need to be added, see the accompanying suggestions
def groupcore(wg, panes): # flag_groupstatus, string_suggestions
##
## Groups the specified panes and returns the findings. If the panes are valid, but there are gaps in the group,
## it recursively detects which panes need to be added to complete the group. If a group is determined to be valid,
## the windowgram may be trivially updated by the caller using a simple search and replace.
##
used, unused = wg.Panes_GetUsedUnused()
# Pane validity
for pane in set(panes):
if pane not in used or pane in unused:
return GroupStatus.Invalid_Panes, None
# Function for assembly of panes detected within any gaps of the mask
def pane_deficit_detection(wg_win, x1, y1, x2, y2, panes):
# Parameters: windowgram, rectangular bounds of mask, valid panes
deficient_panes = ""
wgw_windowgram_chars = wg_win.Export_Chars()
wgm_windowgram_chars = wg_msk.Export_Chars()
for y in range( len(wgw_windowgram_chars) ):
for x in range( len(wgw_windowgram_chars[y]) ):
w, m = wgw_windowgram_chars[y][x], wgm_windowgram_chars[y][x]
if x >= x1-1 and x <= x2-1 and y >= y1-1 and y <= y2-1 and w not in set(panes):
deficient_panes += w
return deficient_panes
# Run deficit detection until none remain (e.g., mask == windowgram)
suggestions = ""
while True:
# Draw mask and yield rectangular bounds
wg_msk = Windowgram_Mask_Generate( wg, panes )
x1, y1, x2, y2 = wg_msk.Panes_PaneXYXY( MASKPANE_1 )
# Find pane content of any existing gaps
deficient_panes = pane_deficit_detection( wg, x1, y1, x2, y2, panes )
if not deficient_panes: break
panes = PaneList_AssimilatedSorted( panes, deficient_panes )
suggestions = PaneList_AssimilatedSorted( suggestions, deficient_panes )
# Result by now will be either of these
if not suggestions: return GroupStatus.Success, ""
return GroupStatus.Insufficient_Panes, suggestions
##
## Edge core ... Tests group of panes for contiguous edge and if valid reduces it to a common form for processing
##
## Callers: drag, insert
##
class EdgeStatus:
Valid = 1
Irrational = 2 # No edge produced from data
Ambiguous = 3 # One or more edges were found (which may be on one or more axis)
Noncontiguous = 4 # The produced edge has one or more gaps
@staticmethod
def error2string(status):
if status == EdgeStatus.Valid: return None
elif status == EdgeStatus.Irrational: return "No edge found (EdgeStatus.Irrational)"
elif status == EdgeStatus.Ambiguous: return "Edges found on each axis (EdgeStatus.Ambiguous)"
elif status == EdgeStatus.Noncontiguous: return "Multiple edges found (EdgeStatus.Noncontiguous)"
else: return "Unknown error (EdgeStatus.Undefined)"
# Required for merging contiguous runs that are likely to be unordered ... Presently SwipeSide produces such runs
def edgecore_merger(runs):
# Not always needed, but always expected: Strip duplicate runs, as how one would expect list(set(runs)) to work
runs, scanruns = [], runs
for run in scanruns:
match = False
for setrun in runs:
if run == setrun: match = True
if not match: runs += [ run ]
# Merges neighboring runs where possible
oruns = sorted( runs, key = operator.itemgetter( 0, 1 ) )
nruns = []
run = []
for (y, x1, x2) in oruns:
if run == []: run = [y, x1, x2]
elif run[0] == y and run[2] + 1 == x1: run[2] = x2
else: nruns += [run] ; run = [y, x1, x2]
if run != []: nruns += [run]
return nruns
# SideSwipe and SwipeSide ... Though syllable-swapped, the names are accurate descriptions of the respective algorithms
def edgecore_sideswipe(p1x1, p1x2, p2x1, p2x2, p1y1, p1y2, p2y1, p2y2): # [ axis_opposite, scan_a, scan_b ] or None
# SideSwipe == Edge bordering panes only if contiguous
decrement_run = lambda run: [ val-1 for val in run ] # How one might expect run-1 to work
if p2x1 == p1x2 + 1:
if p2y1 <= p1y1 and p2y2 <= p1y2 and p1y1 <= p2y2: return decrement_run( [ p2x1, p1y1, p2y2 ] )
if p1y1 <= p2y1 and p2y2 <= p1y2 : return decrement_run( [ p2x1, p2y1, p2y2 ] )
if p2y1 <= p1y1 and p1y2 <= p2y2 : return decrement_run( [ p2x1, p1y1, p1y2 ] )
if p1y1 <= p2y1 and p1y2 <= p2y2 and p2y1 <= p1y2: return decrement_run( [ p2x1, p2y1, p1y2 ] )
if p1x1 == p2x2 + 1:
if p1y1 <= p2y1 and p1y2 <= p2y2 and p2y1 <= p1y2: return decrement_run( [ p1x1, p2y1, p1y2 ] )
if p2y1 <= p1y1 and p1y2 <= p2y2 : return decrement_run( [ p1x1, p1y1, p1y2 ] )
if p1y1 <= p2y1 and p2y2 <= p1y2 : return decrement_run( [ p1x1, p2y1, p2y2 ] )
if p2y1 <= p1y1 and p2y2 <= p1y2 and p1y1 <= p2y2: return decrement_run( [ p1x1, p1y1, p2y2 ] )
return None
def edgecore_swipeside(top, panedict, group, windowgram_chars): # [ [ axis_opposite, scan_a, scan_b ], ... ]
# SwipeSide == Edge of specified pane where neighbor is not in group ... TB only, LR use transposition
p1x1, p1y1 = panedict['x'], panedict['y']
p1x2, p1y2 = panedict['x'] + panedict['w'] - 1, panedict['y'] + panedict['h'] - 1
# Runs as [ [ axis_opposite, scan_a, scan_b ], [ axis_opposite, scan_a, scan_b ], ... ]
runs, run = [], []
for ix in range( p1x1-1, p1x2 ):
pair = None
if top and ( p1y1 == 1 or windowgram_chars[p1y1-2][ix] not in group ):
pair = [ p1y1-1, ix ]
if not top and ( p1y2 == len(windowgram_chars) or windowgram_chars[p1y2][ix] not in group ):
pair = [ p1y2, ix ]
if pair is not None:
if run == []: run = [ pair[0], pair[1], pair[1] ]
elif pair[1] == run[2] + 1: run[2] = pair[1]
else: runs += [run] ; run = [ pair[0], pair[1], pair[1] ]
if run != []: runs += [run]
return runs
# Used in edgecore and elsewhere
def edgecore_buildoptimal(windowgram_parsed, axis, group, minimal): # optimal
#
# Derive an optimal run from the produced minimal run
#
# The following are examples of minimal ("m"), optimal ("o"), and identical ("=") runs where they overlap entirely.
# The "|" is just a reminder that this is an illustration of edges (as opposed to panes, as are typical elsewhere).
# Dots are for irrelevant areas of the windowgram. Vertical edges are shown, but of course the same applies to
# horizontal when the windowgram is transposed.
#
# ......| |...... ......| |...... ......| |......
# ......| |...... ...qqq|o|xxx... ...MMM| |NNN...
# ...aaa| |bbb... ...111|o|xxx... ...111|o|OOO...
# ...111|=|222... ...111|m|222... ...111|m|222...
# ...zzz| |yyy... ...111|o|rrr... ...PPP|o|222...
# ......| |...... ...www|o|rrr... ...QQQ| |RRR...
# ......| |...... ......| |...... ......| |......
#
# E.1 E.2 E.3
#
# E.1 : The minimal edge is also the optimal edge. Typically optimal will be used for mechanics in split and
# grid platforms. However for illustrating the edge, minimal will probably be used, or a combination of
# the two. In this particular case it doesn't matter since they're the same.
#
# E.2 : Example "drag vertical 12 right 1", if minimal is used then there will be shearing on pane "1", this
# results in an irregular pane, thus an invalid windowgram. However if optimal is used, then drag knows
# what panes are affected by the drag, and assures a valid windowgram is produced every time. There are
# other concerns with the "drag" function that are outside the scope of this core, see callers.
#
# E.3 : Similar to E.2, "drag vertical 12:MNOPQR". Note that the scalegroup does not apply to this algorithm,
# only the edge group factors into the optimal choice. Dragging the scalegroup is handled independently
# of the edges, by using masking and pasting.
#
optimal = copy.deepcopy(minimal)
affected = group
# Build a list of qualifying edges that are perpendicular to the run
qedges = [] # Contains only the full run for the qualifying edge, as [ [ paneid, scan_a, scan_b ], ... ]
for paneid in windowgram_parsed.keys():
parsedpane = windowgram_parsed[paneid]
on_axis = optimal[0][0]
x1, x2 = parsedpane['x'] - 1, parsedpane['x'] + parsedpane['w'] - 1
y1, y2 = parsedpane['y'] - 1, parsedpane['y'] + parsedpane['h'] - 1
if axis == "v" and ( x1 == on_axis or x2 == on_axis ): qedges.append( [ paneid, y1, y2 ] )
if axis == "h" and ( y1 == on_axis or y2 == on_axis ): qedges.append( [ paneid, x1, x2 ] )
# Function to merge first overlapping edge into the existing run and remove it from further consideration
def edgemagnet(qedges, panes, run_as_runs): # changed, qedges, panes, run_as_runs
# An edge is included if and only if it overlaps at least one character of the existing edge
run = run_as_runs[0]
for qedge in qedges:
# Not the most efficient way to test overlap
sq = list(range(qedge[1], qedge[2] + 1))
sr = list(range(run [1], run [2] + 1))
sqr = list(sorted(set(sq + sr)))
if len(sq) + len(sr) - len(sqr) > 1: # These counts are edge-to-edge, +1 is not an actual overlap
run = [ run[0], sqr[0], sqr[-1] ]
if not qedge[0] in panes: panes += qedge[0]
qedges = [ _ for _ in qedges if _[0] != qedge[0] ]
return True, qedges, panes, [run]
return False, qedges, panes, run_as_runs
# One dimensional overlap assimilation loop that halts when no more additions are found
while True:
changed, qedges, affected, optimal = edgemagnet( qedges, affected, optimal )
if not changed: break
return optimal
# The main entry for producing an edge from a specified group
def edgecore(wg, group, direction=None): # status, axis, minimal, optimal
##
## An edge may be specified in either form:
##
## group Group of panes defines the edge by fully enclosing it with panes, unambiguously
## group, direction Direction clarifies the edge by specifying either: 1) cardinal direction, 2) axis
##
## The return types:
##
## status EdgeStatus.Valid if successful, if unsuccessful other types will be None, see errors
## axis This is either "h" or "v" for the edge axis
## minimal A single run in form of [[ on_axis, from_edge, to_edge ]]
## optimal Same as minimal but edge is extended to be compatible with split/grid windowgrams
##
## For tmuxomatic, (group, direction) should be used and/or the optimal edge favored, otherwise the result could
## produce windowgrams that are incompatible with tmux.
##
## For difference between minimal and optimal, consider the unit tests, or see the "drag" implementation.
##
used, unused = wg.Panes_GetUsedUnused()
# Resolve direction to common form ("v", "h", "t", "b", "l", "r") for easier reference
if direction is not None:
direction = resolve_vhtblr( direction )
# Algorithm uses parsed and character formats for edge detection
windowgram_parsed = wg.Export_Parsed()
windowgram_chars_yx = wg.Export_Chars()
windowgram_chars_xy = Windowgram_Convert.Transpose_Chars( windowgram_chars_yx ) # Only used to simplify TBRL
# Unordered array of edge characters, to be sorted later; defined as right/bottom of N, where left/top origin is N=0
edgeruns_v = [] # Vertical edges as ..... [ (X, y1, y2), (X, y1, y2), ... ]
edgeruns_h = [] # Horizontal edges as ... [ (Y, x1, x2), (Y, x1, x2), ... ]
# Produce the minimal edge
for paneid1 in windowgram_parsed.keys():
pane1 = windowgram_parsed[paneid1]
if paneid1 in group:
# Side specification (TBLR) treats the group as a contiguous pattern, as if it were a single -- and possibly
# non-rectangular -- pane. In other words, when scanning an edge, if the neighbor is within the specified
# group, that edge is simply ignored. For example "12": "left 12" ignores the edge between "1" and "2",
# producing an edge that is identical to "left 1". Any grouping that is non-contiguous or non-rectangular
# is permitted here, but will be rejected later due to the ambiguity. There is no reason to allow such
# ambiguity for the anticipated callers. For example "12\n34", some ambiguity occurs: "left 124" produces
# edges that are equivalent of two separate but combined calls, "left 1" and "left 4". Multiple edges are
# not valid for the expected use of edge definitions within the windowgram context, so any panes tangential
# to the edge definition panes should not be included, as they would interfere with edgecore processing.
if direction == "t" or direction == "b":
# Side TB (explicit)
topleft = True if direction == "t" else False
parsedpane = pane1
edgeruns_h += edgecore_swipeside( topleft, parsedpane, group, windowgram_chars_yx )
elif direction == "l" or direction == "r":
# Side LR (explicit)
topleft = True if direction == "l" else False
parsedpane = Windowgram_Convert.Transpose_ParsedPane(pane1)
edgeruns_v += edgecore_swipeside( topleft, parsedpane, group, windowgram_chars_xy )
else: # direction == "v" or direction == "h"
# Axis VH (implicit or explicit)
for paneid2 in windowgram_parsed.keys():
pane2 = windowgram_parsed[paneid2]
if paneid2 in group and paneid1 != paneid2:
run = None
p1x1, p1y1 = pane1['x'], pane1['y']
p1x2, p1y2 = pane1['x'] + pane1['w'] - 1, pane1['y'] + pane1['h'] - 1
p2x1, p2y1 = pane2['x'], pane2['y']
p2x2, p2y2 = pane2['x'] + pane2['w'] - 1, pane2['y'] + pane2['h'] - 1
if direction == "v" or direction is None:
run = edgecore_sideswipe( p1x1, p1x2, p2x1, p2x2, p1y1, p1y2, p2y1, p2y2 )
if run is not None: edgeruns_v += [ run ]
else:
if direction == "h" or direction is None:
run = edgecore_sideswipe( p1y1, p1y2, p2y1, p2y2, p1x1, p1x2, p2x1, p2x2 )
if run is not None: edgeruns_h += [ run ]
# Merge runs ... Neighboring runs are possible since panes are processed independently in edgecore_swipeside
edgeruns_v = edgecore_merger( edgeruns_v ) # Duplicate input runs are possible
edgeruns_h = edgecore_merger( edgeruns_h ) # Duplicate input runs are possible
# Select edge ... Resolves implicit axis
minimal, axis = (edgeruns_v, "v") if edgeruns_v else (edgeruns_h, "h") # Ambiguity is rejected below
# Drop if invalid, with reason
if not edgeruns_v and not edgeruns_h: return EdgeStatus.Irrational, None, None, None # Irrational == No results
if edgeruns_v and edgeruns_h: return EdgeStatus.Ambiguous, None, None, None # Ambiguous == Multiple axis
if len(minimal) > 1: return EdgeStatus.Noncontiguous, None, None, None # Noncontiguous == Multiple edge | Gaps
# Up to now the runs are defined as character-to-character, but they must be edge-to-edge from here on
minimal[0][2] += 1
# Build an optimal run from the minimal run
# The optimal run is most likely to be used, and it's convenient to produce at this point
optimal = edgecore_buildoptimal( windowgram_parsed, axis, group, minimal )
# Done
return EdgeStatus.Valid, axis, minimal, optimal
# Edge to windowgram edge alignment assessment
def edgecore_windowgramedgealignment(wg, hint, edge):
# Hint must be an axis ... Result: -1 for left/top, -2 for right/bottom, 0 for no alignment
wgw, wgh = wg.Analyze_WidthHeight()
hint = -1 if is_long_axis_vert(hint) else -2 if is_long_axis_horz(hint) else 0
return (-1 if edge[0] == 0 else -2 if edge[0] == (wgw if hint == -1 else wgh) else 0) if hint else 0
# Located with edgecore, but used in conjunction with smudgecore
def edgecore_edgetoedge(axis, edge, width, height):
# Returns -1 for top/left edge, +1 for bottom/right edge, 0 otherwise
if edge[0] == 0: return -1
if edge[0] == (width if axis == "v" else height): return 1
return 0
##
## Smudge core ... This copies the border of specified edge in the perpendicular direction of length
##
## Callers: drag, insert (inline)
##
def smudgecore(wg, edge, axis, length, direction, run=None): # wg
# Must handle smudging beyond windowgram dimensions by enlarging the windowgram to accommodate
wgw, wgh = wg.Analyze_WidthHeight()
span = wgw if axis == "v" else wgh
edge_characters = wg.Edge_Extract(axis, edge, "" if direction == "-" else "-") # Extracts the trailing side
if run is None or set(edge_characters) == {MASKPANE_X} \
or (edge == 0 and direction == "-") or (edge == span and direction == ""):
run = [ edge, 0, len(edge_characters) ] # Full edge for expansion, contraction, or default
fr, to = edge + (-1 if direction == "-" else 0), edge + (-length if direction == "-" else length-1)
fr, to = min(fr, to), max(fr, to)
if fr <= 0: ed, ec, fr = -1, -fr, 0
elif to >= span: ed, ec, to = 1, to-span+1, span-1
else: ed, ec = 0, 0
wgout = Windowgram("")
# Smudge the edge
windowgram_chars, windowgram_chars_from = [], wg.Export_Chars()
if axis == "h": windowgram_chars_from = Windowgram_Convert.Transpose_Chars( windowgram_chars_from )
for iy, line in enumerate(windowgram_chars_from): # Modifications
windowgram_chars += [[(edge_characters[iy] if ix >= fr and ix <= to and iy >= run[1] and iy < run[2] \
else ch) for ix, ch in enumerate(line)]]
for iy, _ in enumerate(windowgram_chars): # Additions
if ed < 0: windowgram_chars[iy] = list(edge_characters[iy] * ec) + windowgram_chars[iy]
else: windowgram_chars[iy] += list(edge_characters[iy] * ec)
if axis == "h": windowgram_chars = Windowgram_Convert.Transpose_Chars( windowgram_chars )
wgout.Import_Chars( windowgram_chars )
# Truncate transparency and return
wgout.Edge_ClipOuterTransparents()
return wgout
##----------------------------------------------------------------------------------------------------------------------
##
## Flex (windowgram modification console)
##
##----------------------------------------------------------------------------------------------------------------------
##
## Possible improvements:
##
## Proportional scale, using the following aliases:
##
## @ Size of user window proportional to counter axis
## * Size of current windowgram (just an alias for 1x/100%)
##
## scale 20:@ Scale y proportionally according to 20 x (200:100 -> 20:10)
## scale @:* Scale x proportionally to current y (200:100, 100:25 -> 50:25)
## scale @ If @ is specified for both, it's 50% of the window (200:100 -> 100:50)
##
## Possible modifiers:
##
## breakout <pane> [shapes] break with axial concatenated shapes, "2x2; x 2x2 3x1; y 1 3x3 1"
## cycle <panegroup1> <panegroup2> ... cycle if all bounding boxes share full edge, or are of equal size
## swapname <panes1> <panes2> swap if both panes are defined otherwise rename (redundant?)
## blockswap <panes1> <panes2> swaps one block of panes for another, e.g. `BLDbld` with `1` in demo
## subscale <group> <xy> exactly like scale but on a group of panes within the windowgram
## truncate <edge> truncate windowgram at specified edge (loses panes)
## move <group> <edge> same as clone but delete then insert
## jump <group1> <group2> same as clone but delete then replace existing pane/group
##
## Required for another project:
##
## blank <panes> transparency support: convert specified panes "."
## fill transparency support: convert "." into neighboring panes
##
## Other features:
##
## Allow for direct windowgram edit mode, this effectively makes flex a modal editor. Needs ncurses, so it's a
## feature for 3.x.
##
## Expectations:
##
## The object (pane, group, edge) should always be the first argument to any command (with exception for qualifier)
##
## All ordering is in English order: front -> back, top -> bottom, left -> right
##
## TODO:
##
## For edge specifying parameters, consider switching from the current support of either a single-parameter deduced
## edge ("<edge>") or double-parameter specified edge ("<hint> <edge>"), over to a single-parameter variable option
## that may be used for both ("<edge>" or "<hint>@<edge>"). Maybe keep the double-parameter option support as a
## hidden feature unless it becomes a problem for future commands. Note that ":" cannot be used as a delimiter
## since it's used to designate additional panes in the scale group for the "drag" command. This will affect the
## function EdgeProcessing.argument_processor() and the commands that use it.
##
##----------------------------------------------------------------------------------------------------------------------
describe = lambda kwargs: True if 'menu' in kwargs and kwargs['menu'] is True else False
usage_triplets = lambda cmd_dict: [ cmd_dict['usage'][ix:ix+3] for ix in range( 0, len(cmd_dict['usage']), 3 ) ]
##
## Output controls ... Only flex helpers and selectors should use this directly, others should use warnings queue
##
class FlexNotice(object):
def __init__(self, level, message): self.level = level ; self.message = message
def GetLvl(self): return self.level
def GetMsg(self): return self.message
class FlexWarning(FlexNotice):
def __init__(self, message): super( FlexWarning, self ).__init__( 0, message )
class FlexError(FlexNotice):
def __init__(self, message): super( FlexError, self ).__init__( 1, message )
##
## Lists of commands ... Commands are ordered by appearance in source
##
flexmenu_top = [] # List of all commands and aliases (top: user commands)
flexmenu_bot = [] # List of all commands and aliases (bottom: modifiers + user commands)
flexmenu_aliases = [] # List of all aliases (recognized but not displayed)
flexmenu_grouped = {} # List of grouped commands (for the short menus)
##
## Other globals
##
flexmenu_session = None # Session object in global scope for modification by commands
flexmenu_index = [ 0 ] # Selected window, list is for reference purposes only
##
## Flex sense (TODO: Make this a class)
##
flexsense_reset = {
'finished': False, # User exit
'restore': False, # User exit: Restore original
'execute': False, # User exit: Run session
'output': [], # Command output
'notices': [], # Command notices: Print and continue (FlexWarning, FlexError)
'errors': [], # Command errors: Print and exit
}
##
## Flex: Conversion of windowgram metrics
## Supports floating point values (example: "2.5x")
##
def arg_is_multiplier(arg):
if type(arg) is str and len(arg) > 1:
if arg[:-1] == "".join([ch for ch in arg[:-1] if ch in "+-0123456789.,"]):
if arg[-1:] == "x" or arg[-1:] == "X" or arg[-1:] == "*": return True
return False
def arg_is_percentage(arg):
if type(arg) is str and len(arg) > 1:
if arg[:-1] == "".join([ch for ch in arg[:-1] if ch in "+-0123456789.,"]):
if arg[-1:] == "%": return True
return False
def arg_is_characters(arg):
try:
_ = int(arg)
return True
except ValueError:
return False
def size_GetType(arg):
# Return type or None if invalid
if arg_is_multiplier(arg): return "multiplier"
if arg_is_percentage(arg): return "percentage"
if arg_is_characters(arg): return "characters"
return None
def size_GreaterOrEqualToBaseCharacters(arg, base_characters):
# If the parameter is greater or equal to 100%, 1x, or base_characters
if size_GetType(arg) is not None:
if arg_is_multiplier(arg): return True if float(arg[:-1]) >= 1.0 else False
if arg_is_percentage(arg): return True if float(arg[:-1]) >= 100.0 else False
if arg_is_characters(arg): return True if int(arg) >= base_characters else False
return None
def size_ConvertToCharacters(arg, base_characters):
if size_GetType(arg) is not None:
if arg_is_multiplier(arg): return int(float(base_characters) * float(arg[:-1]))
if arg_is_percentage(arg): return int(float(base_characters) * (float(arg[:-1]) / 100.0))
if arg_is_characters(arg): return int(arg)
return None
def size_ValidUnit(arg):
# Return None if not percentage or multiplier, otherwise True if unit (0.0x-1.0x or 0.0%-100.0%) else False
if size_GetType(arg) is not None:
if arg_is_multiplier(arg): return True if float(arg[:-1]) >= 0.0 and float(arg[:-1]) <= 1.0 else False
if arg_is_percentage(arg): return True if float(arg[:-1]) >= 0.0 and float(arg[:-1]) <= 100.0 else False
return None
##
## Flex: Handling of newpanes parameter
##
def newpanes_RebuildPaneListsInPreferentialOrder(used, unused, newpanes):
# Identify last valid pane in list while rebuilding unused pane list in a preferential order
work, unused = unused, ""
lastpaneid = ""
for paneid in list(newpanes):
if paneid in PANE_CHARACTERS: lastpaneid = paneid # Last valid paneid
if paneid in work and paneid not in used: unused += paneid # Ignore invalid panes
work, used = PaneList_MovePanes( work, used, unused )
# Combine by next highest match
ix = 0 # In case of empty set
for chkix, paneid in enumerate(list(work)):
if PANE_CHARACTERS.find(paneid) >= PANE_CHARACTERS.find(lastpaneid): ix = chkix ; break
unused += work[ix:] + work[:ix] # Ordered by assignment availability, rooted by lastpaneid
# Return both (note only unused is preferentially reordered)
return used, unused
##
## Flex: Other macros
##
def panes_in_use_message_generate(panes_in_use):
if not panes_in_use:
return None
print_panes = "pane" + ("s" if len(panes_in_use) > 1 else "")
print_isare = "are" if len(panes_in_use) > 1 else "is"
return "Specified " + print_panes + " (" + panes_in_use + ") " + print_isare + " already in use"
##
## Decorator for building flex commands
##
class flex(object):
def __init__(self, command="", examples=[], description=None, aliases=[], group="", insert=False):
self.command_only = command
self.description = description
self.examples = examples
self.aliases = aliases
self.group = group
self.insert = insert
def __call__(self, function):
# From function build usage
self.usage = self.command_only
self.arglens = [ 0, 0 ] # [ Required, Total ]
spec = inspect.getargspec(function)
la = len(spec.args) if spec.args else 0
ld = len(spec.defaults) if spec.defaults else 0
class NoDefault: pass # Placeholder since None is a valid default argument
args_with_defaults = [ ( spec.args[ix], (NoDefault if ix < la-ld else spec.defaults[ix-(la-ld)]) ) \
for ix in range(0, len(spec.args)) ]
brackets = lambda optional: "[]" if optional else "<>"
def tagged(arg, tag): return True if tag in arg else False
def clipped(arg, tags):
arg = arg
for tag in tags:
if arg.find(tag) >= 0: arg = arg[:arg.find(tag)] + arg[arg.find(tag)+len(tag):]
return arg
for arg, default in args_with_defaults:
# Regular argument types (normally required):
# _PRIVATE This argument is never shown to the user
# _OPTIONAL Listed as optional even if a default parameter was not specified
# =Default Listed as optional if a default parameter was specified
if not tagged(arg, "_PRIVATE"):
self.arglens[1] += 1
if default is NoDefault: self.arglens[0] += 1
optional = True if tagged(arg, "_OPTIONAL") else False
arg = clipped( arg, ["_OPTIONAL"] ) # Clip markers before printing
enclosed = brackets( default is not NoDefault or optional )
self.usage += " " + enclosed[0] + arg + enclosed[1]
if spec.varargs:
# Variable argument types (normally optional):
# _REQUIRED Makes the parameter required where it is normally optional
varargs = spec.varargs
required = optional = False
required = True if tagged(varargs, "_REQUIRED") else False
varargs = clipped( varargs, ["_REQUIRED"] ) # Clip markers before printing
enclosed = brackets( not required )
self.usage += " " + enclosed[0] + varargs + "..." + enclosed[1]
if required: self.arglens[0] += 1 # If required then varargs is [REQ+1, -1] instead of [REQ, -1]
self.arglens[1] = -1 # Represents use of *args
# Adds new menu item, or appends usage and examples if it already exists
# Description is only used on first occurrence of the command, successive commands append without description
append = False
for entdict in flexmenu_top + flexmenu_bot:
if entdict['about'][0] == self.command_only:
entdict['funcs'] += [ function ]
entdict['usage'] += [ self.usage, self.examples, self.arglens ]
entdict['group'] += [ self.group ]
append = True
if not append:
obj = {
'funcs': [ function ],
'about': [ self.command_only, self.description ],
'usage': [ self.usage, self.examples, self.arglens ],
'group': [ self.group ]
}
if self.insert: menu = flexmenu_top
else: menu = flexmenu_bot
menu.append( obj )
# Add aliases if any
for ix, alias_tup in enumerate(self.aliases):
if type(alias_tup) is not list:
print("Flex command indexing error: " + self.command_only + " alias #" + str(1+ix) + " is not a list")
exit()
if len(alias_tup) != 2:
print("Flex command indexing error: " + self.command_only + " alias #" + str(1+ix) + " is not a pair")
exit()
flexmenu_aliases.append( alias_tup )
# Grouped commands
if not self.group in flexmenu_grouped: flexmenu_grouped[self.group] = []
if not self.command_only in flexmenu_grouped[self.group]:
flexmenu_grouped[self.group].append(self.command_only)
# Function wrapper
def wrapper(*args):
return function(*args)
return wrapper
##
## Flex modifier pointers parameter ... Because modifiers are in a separate module, this pointers parameter is required
##
class FlexPointersParameter(object):
def __init__(self, flexmenu_session, wg_ptr, flexsense_ptr):
self.flexmenu_session = flexmenu_session
self.wg = wg_ptr
self.flexsense = flexsense_ptr
return None
##
## Flex automated processor for one or more commands in the flex group "modifiers"
##
## * Supports multiple commands ("cmd 1 ; cmd 2 ; cmd 3")
## * Only commands from the flex group "modifiers" are supported
## * No command ambiguity
## * No command aliases
## * Processing halts on flex warning or error (if noticesok == False)
##
## Used by
##
## * Unit testing
## * Stack reprocessor (when stack support is added)
## * Other macroing may use this
##
def flex_processor(wg, commands, noticesok=False): # -> error
processed = found = False
for command in commands.split(";"):
command = command.strip()
command, arguments = re.split(r"[ \t]+", command)[:1][0], re.split(r"[ \t]+", command)[1:]
for cmd_dict in flexmenu_top + flexmenu_bot:
for ix, triplet in enumerate(usage_triplets(cmd_dict)):
usage, examples, arglens = triplet
group = cmd_dict['group'][ix]
if group == "modifiers":
funcname = cmd_dict['about'][0]
if funcname == command:
found = True
if len(arguments) >= arglens[0] and (len(arguments) <= arglens[1] or arglens[1] == -1):
# Prepare for new command
flexsense = copy.deepcopy( flexsense_reset )
# Execute
args = [ FlexPointersParameter( None, wg, flexsense ) ] + arguments
cmd_dict['funcs'][ix]( *args )
# Error handler
if flexsense['notices'] and not noticesok:
output = "There were warnings or errors when processing: " + commands + "\n"
output = output+"\n".join([ "* "+warn.GetMsg() for warn in flexsense['notices'] ])+"\n"
return output
# Processed
processed = True
if not found: return "Command not found: " + commands + "\n"
if not processed: return "Command argument mismatch: " + commands + "\n"
return None
##
## Flex Edge Processing (includes handling of hints, edges, and scalegroups)
##
class EdgeProcessing:
@staticmethod
def argument_processor(hint, edge, used, unused, getsc): # -> error, res_hint, res_edge, res_scalegroups
# Process the edge parameter for flex commands: drag, insert, clone ... Handles swapping of hint and edge
ignore = "*@:" # The "@" is for when the parameters hint and edge are combined
# Reduce edge and resolve hint
swapped = ""
res_hint = resolve_vhtblr( hint ) if hint is not "" else ""
res_edge = thruvalid_panes( edge, ignore )
if not res_hint or not res_edge:
swapped_hint = resolve_vhtblr( edge ) if edge is not "" else ""
swapped_edge = thruvalid_panes( hint, ignore )
if swapped_hint and res_edge:
res_hint, res_edge = swapped_hint, swapped_edge
swapped = "swapped "
# From here on use res_hint, res_edge
hint = edge = None
# Error handlers
if res_hint is None: # A blank string for the hint is considered valid
return ( "The " + swapped + "hint is unrecognized" ), None, None, None
if not res_edge: return ( "The " + swapped + "edge contains invalid pane characters" ), None, None, None
areused, areunused, invalids = classify_panes( used + ignore, unused, res_edge )
if invalids: return ( \
"Panes (" + invalids + ") in " + swapped + "edge (" + res_edge + ") are invalid" ), None, None, None
if areunused: return ( \
"Panes (" + areunused + ") in " + swapped + "edge (" + res_edge + ") are not being used" ), None, None, None
# Separate res_edge into res_edge and res_scalegroups[]
res_scalegroups = []
if ":" in res_edge:
if not getsc:
return ( "The user provided scalegroup(s), but they are unsupported by this command" ), None, None, None
scalegroup_list = res_edge.split(":")
res_edge, res_scalegroups = scalegroup_list[0], scalegroup_list[1:]
# Replace character "*" with all panes
if "*" in res_edge:
res_edge = AllPanes( res_edge, used )
res_scalegroups, res_scalegroups_in = [], res_scalegroups
for res_scalegroup in res_scalegroups_in:
if "*" in res_scalegroup:
res_scalegroups.append( AllPanes( res_scalegroup, used ) )
else:
res_scalegroups.append( res_scalegroup )
# The user should use these resolved variables in place of those provided by the user
return None, res_hint, res_edge, res_scalegroups
##
## Flex: Reset
##
## Notes:
##
## Like creating a new window without creating a new window
##
@flex(
command = "reset",
group = "modifiers",
description = "Resets the windowgram as if creating a new window.",
# TODO: Rename to one of these so there is no two-letter conflict with the rename command
aliases = [ ["blank", "reset "], ["begin", "reset "], ["initialize", "reset "], ["clear", "reset "], ],
)
def cmd_reset(fpp_PRIVATE):
fpp_PRIVATE.wg.Import_Wg( Windowgram( NEW_WINDOWGRAM ) )
##
## Flex: Scale
##
## Analogues:
##
## Drag * may be used for scale
##
@flex(
command = "scale",
group = "modifiers",
examples = [ "scale 25", "scale 500%", "scale 2x", "scale 64:36", "scale 64x36" ],
description = "Scale the windowgram. Valid parameters are multipliers (x), percentages (%), exact character " + \
"dimensions, or any combination thereof. Use a space ( ), colon (:), or times (x) to separate " + \
"the x and y axis. If only one axis is specified then the value will be applied to both x and " + \
"y. When specifying both, any valid combination will work, including mixing multipliers with " + \
"the times separator, for example \"2xx2x\", \"200%x2x\", \"2xx200%\", etc.",
aliases = [ ["size", "scale "],
["half", "scale 50%"], ["double", "scale 2x"],
["wider", "scale 200%:100%"], ["thinner", "scale 50%:100%"],
["taller", "scale 100%:200%"], ["shorter", "scale 100%:50%"],
["higher", "scale 100%:200%"], ["lower", "scale 100%:50%"], ],
)
def cmd_scale_1(fpp_PRIVATE, xy_how): # 1 parameter
# Wrapper for two parameter scale, splits "64:36" and "64x36" into "64 36", works with percentages and multipliers
if ":" in xy_how:
if xy_how.count(":") == 1: return cmd_scale_2( fpp_PRIVATE, *xy_how.split(":") )
elif "x" in xy_how:
count = xy_how.count("x")
endwx = xy_how.endswith("x")
if count == 1 and not endwx:
return cmd_scale_2( fpp_PRIVATE, *xy_how.split("x") )
if count == 2:
if endwx: return cmd_scale_2( fpp_PRIVATE, *xy_how.split("x", 1) )
else: return cmd_scale_2( fpp_PRIVATE, *xy_how.rsplit("x", 1) )
if count == 3 and endwx:
parts = xy_how.split("x", 2)
return cmd_scale_2( fpp_PRIVATE, parts[0]+"x", parts[2] )
# All others are simply cloned like "2x" into "2x 2x"
return cmd_scale_2( fpp_PRIVATE, xy_how, xy_how )
@flex(
command = "scale",
group = "modifiers",
examples = [ "scale 25 15", "scale 200% 50%", "scale 2x .5x" ],
)
def cmd_scale_2(fpp_PRIVATE, x_how, y_how): # 2 parameters
# Because text is inherently low resolution, fractional scaling may produce unsatisfactory results
# Generics
wg_before = fpp_PRIVATE.wg
dim_before = wg_before.Analyze_WidthHeight()
# Convert to common float multipliers for easy scaling
args = [ dim_before[0], dim_before[1] ] # Default to no scale on error
for ix, arg in enumerate([x_how, y_how]):
args[ix] = size_ConvertToCharacters(arg, dim_before[ix])
if args[ix] is None:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Invalid size parameter: " + arg ) )
w_chars, h_chars = args
# Scale the windowgram
wg_after = Windowgram( scalecore( wg_before.Export_String(), w_chars, h_chars ) )
# Verify new windowgram (in case of scale error)
dim_result = wg_after.Analyze_WidthHeight()
if not dim_result[0] or not dim_result[1]:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Scale produced a blank windowgram, skipping" ) )
if dim_result[0] != w_chars or dim_result[1] != h_chars:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Scale produced erroneous result, skipping" ) )
# Alert user to any panes lost
lost_panes = PaneList_DiffLost( wg_before, wg_after )
if len(lost_panes):
fpp_PRIVATE.flexsense['notices'].append(
FlexWarning( "Lost " + str( len(lost_panes) ) + " panes: " + lost_panes ) )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg_after )
##
## Flex: Add
##
## Analogues:
##
## Add may be used for insert at windowgram edge
##
@flex(
command = "add",
group = "modifiers",
examples = [ "add right 50% A", "add b 3", "add l .5x" ],
description = "Append pane to windowgram edge. Edge is identified by name (e.g., right), or a variety of " + \
"abbreviations (e.g., r, rt). The size of the pane may be defined as an exact character size, " + \
"a percentage (%), or a multiplier (x). If [newpane] is not specified, lowest available will " + \
"be used.",
aliases = [ ["append", "add "], ["app", "add "] ],
)
def cmd_add(fpp_PRIVATE, edge, size, newpane=None):
wg_work = fpp_PRIVATE.wg
newpane, error = wg_work.Panes_GetNewPaneId( newpane )
if error:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Unable to secure a new pane id: " + error ) )
# Convert axis-flag to ix to avoid rewrite
axis_as_vh, negate_flag = direction_to_axiswithflag(edge)
if axis_as_vh == "v" and negate_flag == False: ix = 0 # Top
elif axis_as_vh == "v" and negate_flag == True: ix = 1 # Bottom
elif axis_as_vh == "h" and negate_flag == True: ix = 2 # Right
elif axis_as_vh == "h" and negate_flag == False: ix = 3 # Left
else: ix = None
# Process
if ix is not None:
# ix = 0123 == TBRL | NSEW
windowgram_lines = wg_work.Export_Lines()
axis_length = len(windowgram_lines) if (ix == 0 or ix == 1) else len(windowgram_lines[0])
axis_width = len(windowgram_lines) if (ix == 2 or ix == 3) else len(windowgram_lines[0])
size_chars = size_ConvertToCharacters( size, axis_length )
if size_chars is None:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Invalid size parameter: " + size ) )
if ix == 0: # Top
for _ in range( size_chars ): windowgram_lines.insert( 0, newpane * axis_width )
elif ix == 1: # Bottom
for _ in range( size_chars ): windowgram_lines.append( newpane * axis_width )
elif ix == 2: # Right
windowgram_lines = [ line + (newpane * size_chars) for line in windowgram_lines ]
elif ix == 3: # Left
windowgram_lines = [ (newpane * size_chars) + line for line in windowgram_lines ]
# Detect when addition doesn't register and notify user as warning
wg_compare_before = wg_work.Export_String()
wg_work.Import_Lines( windowgram_lines )
wg_compare_after = wg_work.Export_String()
if wg_compare_before == wg_compare_after:
return fpp_PRIVATE.flexsense['notices'].append( FlexWarning( "Addition was too small to register" ) )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg_work )
# Done
return
# Edge not found
return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"The edge you specified is invalid, please specify either: top, bottom, left, or right" ) )
##
## Flex: Break
##
## Analogues:
##
## Break may be used for split 50%
##
## Notes:
##
## Avoiding unnecessary complexity. It's easy to incorporate support for group as target. Such an algorithm would
## break all panes in the group equally and apply the newpanes linearly. This would have to avoid size explosions
## to be reasonably usable, accomplished by finding the most efficient break sequence (common divisors). However
## this seems a bit unnecessary, since these situations are more easily managed by the user of flex; simply perform
## the breaks in a sequence that yields personally satisfactory results. Since this is already possible with flex,
## implementing such a feature would not add much practical benefit and only complicate the function.
##
@flex(
command = "break",
group = "modifiers",
examples = [ "break 1 3x3", "break 0 3x1 x", "break z 3x2 IVXLCD" ],
description = "Break a pane into a grid of specified dimensions. The new panes are guaranteed to be of equal " + \
"size, by automatically scaling up the windowgram as necessary. The [newpanes] parameter is an " + \
"optional starting pane id, or pane rename sequence.",
aliases = [ ["grid", "break "], ["panes", "break "], ],
)
def cmd_break(fpp_PRIVATE, pane, grid, newpanes=None):
# In order to produce a break of even proportions, we have to scale this windowgram up to next best fit. It
# could go one step further and find the most optimal size, being a resolution that evenly scales the original
# windowgram constituent panes, while simultaneously providing a grid of even sizes. The problem is that common
# use cases would result in massive sizes to accommodate; though accurate, it would not be very practical.
wg = fpp_PRIVATE.wg
used, unused = wg.Panes_GetUsedUnused()
if pane not in PANE_CHARACTERS:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The pane you specified is invalid" ) )
elif pane in unused:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The pane you specified does not exist" ) )
# Grid analysis and validity check
gw = gh = panes = 0
reason = "Grid parameter is invalid: " + grid
if grid.count("x") == 1:
gw, gh = grid.split("x")
if gw.isdigit() and gh.isdigit():
gw, gh = int(gw), int(gh)
panes = gw * gh
len_unused = len(unused) + 1 # The +1 accounts for the target pane that becomes available
if not panes:
reason = "Grid you specified results in no panes"
elif panes > len(PANE_CHARACTERS):
reason = "Grid is " + str(panes) + " panes, exceeding max of " + str(len(PANE_CHARACTERS))
elif panes > len_unused:
reason = "Grid is " + str(panes) + " panes, only " + str(len_unused) + " will be available"
else:
reason = None # No error
if reason is not None:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( reason ) )
# Extract the dimensions of the pane to determine requisite scale (if any)
wg_w, wg_h = wg.Analyze_WidthHeight()
px, py, pw, ph = wg.Panes_PaneXYWH( pane )
# Perform a scale if needed
scale_to = lambda r1, r2: (((float(int(r1/r2))+1.0)*r2) if (r1 % r2) else r1) if (r1 > r2) else r2
scale_to_pane_w = int( scale_to( float(pw), float(gw) ) ) # Pane target x
scale_to_pane_h = int( scale_to( float(ph), float(gh) ) ) # Pane target y
stw_w = int(float(wg_w) * float(scale_to_pane_w) / float(pw)) # Window target x
stw_h = int(float(wg_h) * float(scale_to_pane_h) / float(ph)) # Window target y
# Scale
wg_new = Windowgram( scalecore(
wg.Export_String(), stw_w, stw_h, ( pane, scale_to_pane_w, scale_to_pane_h ) ) )
_, _, npw, nph = wg_new.Panes_PaneXYWH( pane )
# Validate
if (npw != scale_to_pane_w or nph != scale_to_pane_h):
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The result is not the expected pane size" ) )
# Swap
wg = wg_new
# Dimensions must be reloaded in the event that the windowgram was scaled
wg_w, wg_h = wg.Analyze_WidthHeight()
px, py, pw, ph = wg.Panes_PaneXYWH( pane )
# Manually move availability of pane so it may be reused
used, unused = PaneList_MovePanes( used, unused, pane )
# Set starting panes. By default this starts at the lowest unused pane id and iterates forward. However
# the user may specify a pane to start the iteration at, for example if it's a 3x2 grid (6 panes produced):
# specified == (None) produces == 012345
# A ABCDEF
# BLN BLNOPQ
# BLN1 BLN123
if newpanes:
panes_in_use = "".join([ch for ch in newpanes if ch not in unused and ch != pane])
panes_in_use_message = panes_in_use_message_generate( panes_in_use )
if panes_in_use_message:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( panes_in_use_message ) )
used, unused = newpanes_RebuildPaneListsInPreferentialOrder( used, unused, newpanes )
# Replace pane with grid
windowgram_lines = wg.Export_Lines()
wg.Import_Lines( [
"".join( [ ch if ch != pane else unused[int((iy-py+1)*gh/ph)*gw+int((ix-px+1)*gw/pw)] \
for ix, ch in enumerate(list(line)) ] ) for iy, line in enumerate(windowgram_lines)
] )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Join
##
## Analogues:
##
## Join may be used for rename: join <old>.<new>
## Join may be used for swap: join <one>.<two> <two>.<one>
##
## Notes:
##
## Join could be seen as a type of rename, and was used for rename and swap prior to those implementations
##
@flex(
command = "join",
group = "modifiers",
examples = [ "join abcd efgh", "join abcd.x efgh.y" ],
description = "Joins a contiguous group of panes into a single pane. Multiple joins are supported. The " + \
"joined pane is named after the first pane specified in the group, but this can be renamed by " + \
"adding a dot (.) followed by the new pane id.",
aliases = [ ["group", "join "], ["merge", "join "], ["glue", "join "], ],
)
def cmd_join(fpp_PRIVATE, *groups_REQUIRED):
groups = groups_REQUIRED # Readability
argument = lambda ix: str(ix+1) + " (\"" + groups_REQUIRED[ix] + "\")" # Show the group that the user specified
wg = fpp_PRIVATE.wg
# Repackage groups so all have the rename element
work, groups = groups, []
for group in work: groups.append( group if "." in group else (group + "." + group[0]) )
# Walk all groups and build join lists
panes_clipped = ""
for ix, group in enumerate(groups):
try:
# Make sure group is superficially valid
if group.count(".") > 1: raise Exception("Argument contains more than one rename delimiter")
invalids = "".join([ ch for ch in set(group) if ch not in PANE_CHARACTERS and ch != "." ])
if invalids: raise Exception("Group contains invalid characters: " + invalids)
# Verify rename and quietly strip duplicate panes
group_l, group_r = group.split(".")
if len(group_r) == 0: raise Exception("Rename delimiter used but subsequent pane unspecified")
if len(group_r) > 1: raise Exception("Only one pane should be specified after the rename delimiter")
group = "".join( [ ch for ch in sorted(set(group_l), key=lambda x: group.find(x)) ] ) + "." + group_r
# Build group, simulate clip, test presence
notfound = ""
for ch in [ ch for ch in PANE_CHARACTERS if ch in set(group.split(".")[0]) ]: # Ordered set
if ch in panes_clipped: raise Exception("Pane \"" + ch + "\" was already used by a previous group")
if not wg.Panes_HasPane( ch ): notfound += ch
else: panes_clipped += ch
if notfound:
raise Exception("Windowgram does not have pane" + ("(s) " if len(notfound)-1 else " ") + notfound)
except Exception as error:
return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Error with argument " + argument(ix) + ": " + str(error) ) )
# Test the duplication of target panes by matching them against availability adjusted for panes clipped
used, unused = wg.Panes_GetUsedUnused()
used = "".join(list(set(used) - set(panes_clipped)))
unused = "".join( [ ch for ch in PANE_CHARACTERS if ch in (unused + panes_clipped) ] )
for ix, group in enumerate(groups):
try:
group_l, group_r = group.split(".")
if group_r in used:
raise Exception("Attempting to rename to pane " + group_r + " when it's in use")
used += group_r
except Exception as error:
return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Error with argument " + argument(ix) + ": " + str(error) ) )
# Perform the joins, detecting pane gaps in the group, resulting windowgram is paired for later merging
joins = []
for group in groups:
# Join preprocessing
group_l, group_r = group.split(".")
result, suggestions = groupcore(wg, group_l)
if result is GroupStatus.Invalid_Panes: # Occurs if groupcore() panes parameter is invalid
return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Group #" + argument(ix) + " contains invalid panes" ) )
if result is GroupStatus.Insufficient_Panes:
return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Group #" + argument(ix) + " isn't whole, but it would be if you add: " + suggestions ) )
# Join ... By now the group is fully vetted: entirely valid, rectangularly whole
pair_w = Windowgram( wg.Export_String() )
pair_m = Windowgram_Mask_Generate( pair_w, group_l )
pair_w.Panes_Renamer( group_l, group_r )
joins.append( [ pair_w, pair_m ] )
# A separate merge step is required to prevent name conflicts where the user makes use of the rename option.
wg.Import_Mosaic( ( wg, joins ) )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Split
##
## Analogues:
##
## Break may be used for split 50%
##
## Expectations (for testing):
##
## If any of the specified newpanes are invalid, return error
## A negative flag for edges (tblr) is ignored, but used for axis (vh)
##
## TODO:
##
## Possible reordering detection, "split v h" (where "v" is the pane)
## Senses reordering, e.g. "split horz v", and if unable to determine defaults to pane first
##
@flex(
command = "split",
group = "modifiers",
examples = [ "split 1 bottom 3", "split 1 vertical -3", "split 0 left 25% LR" ],
description = "Splits single pane along axis (vert, horz), or from an edge (top, left). For axis, a negation " + \
"of [size] inverses the split. If unspecified, [size] defaults to 50%. Optional [newpanes] " + \
"parameter will rename the panes from newest to oldest (2 panes maximum).",
)
def cmd_split(fpp_PRIVATE, pane, how, size=None, newpanes=None):
wg = fpp_PRIVATE.wg
used, unused = wg.Panes_GetUsedUnused()
axis = how # This argument is handled as an axis
# Set the default size if unspecified
if size is None: size = "50%"
# Verify pane
if pane not in PANE_CHARACTERS:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The pane you specified is invalid" ) )
elif pane in unused:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The pane you specified does not exist" ) )
# Verify axis and reduce to "v" or "h"
inverse = "-" if size[0] == "-" else ""
showinv = inverse # Show inverse flag by default
if is_long_axis_vert(axis): axis = "v"
elif is_long_axis_horz(axis): axis = "h"
else:
if size[0] == "-":
return fpp_PRIVATE.flexsense['notices'].append(
FlexError( "Negative size only valid if `how` is vert or horz" ) )
axis, negate_flag = direction_to_axiswithflag(axis)
if axis is None or negate_flag is None:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The axis you specified is invalid" ) )
inverse = "-" if negate_flag else ""
showinv = "" # For TBRL do not show inverse flag
# Get axis_length
px, py, pw, ph = wg.Panes_PaneXYWH(pane)
axis_length = pw if axis == "h" else ph
# Verify pane is large enough to split
if pw < 2 and ph < 2: # Single character pane
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Pane is too small to be split" ) )
if axis_length < 2: # Single character length on the specified axis
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Pane is too small to be split in that way" ) )
# Verify size
(error, inverse, size_chars), size = resolve_size(size, axis_length, inverse, showinv), None
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( error ) )
if inverse: size_chars = axis_length - size_chars # Flip
# Verify newpanes ... Set to first available if not specified
if len(unused) < 1: return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Insufficient panes to split" ) )
if newpanes is None: newpanes = ""
if len(newpanes) == 0: newpanes += unused[0] # New pane is first available
if len(newpanes) == 1: newpanes += pane # Base pane
if len(newpanes) > 2: return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Parameter newpanes exceeds the function maximum of two panes" ) )
for ch in set(newpanes):
if not ch in PANE_CHARACTERS: return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Invalid pane in newpanes parameter: " + ch ) )
panes_in_use = "".join([ch for ch in newpanes if ch not in unused and ch != pane])
panes_in_use_message = panes_in_use_message_generate( panes_in_use )
if panes_in_use_message: return fpp_PRIVATE.flexsense['notices'].append( FlexError( panes_in_use_message ) )
used, unused = newpanes_RebuildPaneListsInPreferentialOrder( used, unused, newpanes )
# Reorder the newpanes to match fill logic expectations
newpanes = newpanes[:2] if not inverse else newpanes[1::-1]
# Perform the split
src_lines, dst_lines = wg.Export_Lines(), []
sx, sy = px + size_chars if axis == "h" else 0, py + size_chars if axis == "v" else 0
for iy, line in enumerate(src_lines):
if axis == "v": line = "".join( [ newpanes[0 if iy < sy-1 else 1] if ch == pane else ch \
for ch in list(line) ] )
if axis == "h": line = "".join( [ newpanes[0 if ix < sx-1 else 1] if ch == pane else ch \
for ix, ch in enumerate(list(line)) ] )
dst_lines.append( line )
wg.Import_Lines( dst_lines )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Rename
##
## Analogues:
##
## Join may be used for rename: join <old>.<new>
## Rename may be used for swap: rename <old><new> <new><old>
##
## Tests:
##
## new rename.t1 ; break 1 3x2 ABCabc ; rename AaBb BbAa
## new rename.t2 ; break 1 3x2 ABCabc ; rename Aa Bb Bb Cc Cc Aa
## new rename.t3 ; break 1 2x2 1 ; rename 12 21 34 43
## new rename.t4 ; break 1 2x2 1 ; rename 1 2 2 1 3 4 4 3
##
@flex(
command = "rename",
group = "modifiers",
examples = [ "rename Ff Tt", "rename F T f t" ],
description = "Renames from one pane or group, to another pane or group. Paired as <from> <to>. Multiple " + \
"pairs may be specified.",
)
def cmd_rename(fpp_PRIVATE, panes_from, *panes_to):
# This command could have wrapped join, but a native implementation has been made to reduce overhead somewhat
wg = fpp_PRIVATE.wg
used, unused = wg.Panes_GetUsedUnused()
pairs = [ panes_from ] + [ arg for arg in panes_to ]
if len(pairs)&1:
return fpp_PRIVATE.flexsense['notices'].append(
FlexError( "Insufficient data, every <from> must be followed by <to>" ) )
pairs = [ pairs[i*2:i*2+2] for i in range(len(pairs)>>1) ]
# Ends parsed separately to allow for any pair ordering, this parallel effect is also supported by the join command
def proc_list(which): # error or None
nonlocal save_f, work_f, work_t
pair = "1"
for f, t in pairs:
# Counts must match (1:1)
if len(f) != len(t):
return "Pair " + pair + " count error, both <from> and <to> pane counts must be identical"
# Check for self rename (this could be safely ignored)
selfrename = [ fi for fi, ti in zip(list(f), list(t)) if fi == ti ]
if selfrename:
return "Pane `" + selfrename[0] + "` renames to self in pair " + pair
# Iterate all panes in this argument and validate
fort = f if not which else t
for paneid in fort:
if not paneid in PANE_CHARACTERS:
return "Invalid pane `" + paneid + "` in pair " + pair
if which == 0: # Only From
if paneid in work_f: # The pane must not have been previously freed
return "The <from> pane `" + paneid + "` in pair " + pair + " was renamed by another pair"
if not paneid in used: # The pane must already be in use
return "The <from> pane `" + paneid + "` in pair " + pair + " is not being used"
if which == 1: # Only To
if paneid in work_t: # The pane must not have been previously named
return "The <to> pane `" + paneid + "` in pair " + pair + " was already named by another pair"
if not paneid in unused + save_f: # The pane must be unavailable
return "The <to> pane `" + paneid + "` in pair " + pair + " is already being used"
# Next
work_f += f
work_t += t
pair = str( int(pair) + 1 )
return None
# Validate pair lists in order of: from, to
work_f = work_t = save_f = ""
error = proc_list(0) # From
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( error ) )
save_f, work_f, work_t = work_f, "", "" # Retention required for second pass validation
error = proc_list(1) # To
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( error ) )
# Perform the renames independently, result is paired with a mask and stored in a list for use in a mosaic
renames = []
for f, t in pairs:
for pf, pt in zip(f, t):
# Rename ... By now fully vetted
pair_w = Windowgram( wg.Export_String() )
pair_m = Windowgram_Mask_Generate( pair_w, pf )
pair_w.Panes_Renamer( pf, pt )
renames.append( [ pair_w, pair_m ] )
# A separate merge step is required
wg.Import_Mosaic( ( wg, renames ) )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Swap
##
## Analogues:
##
## Join may be used for swap: join <one>.<two> <two>.<one>
## Rename may be used for swap: rename <old><new> <new><old>
##
## Notes:
##
## This was going to be simple single pane swap, but decided to go for the same flexibility as rename
## Because of this, much of the code between rename and swap is the same or similar
##
@flex(
command = "swap",
group = "modifiers",
examples = [ "swap A B", "swap Aa Bb 1 2" ],
description = "Swaps one pane or group, with another pane or group. Paired as <from> <to>. Multiple " + \
"pairs may be specified.",
)
def cmd_swap(fpp_PRIVATE, panes_from, *panes_to):
# This command could have wrapped join, but a native implementation has been made to reduce overhead somewhat
wg = fpp_PRIVATE.wg
used, unused = wg.Panes_GetUsedUnused()
pairs = [ panes_from ] + [ arg for arg in panes_to ]
if len(pairs)&1:
return fpp_PRIVATE.flexsense['notices'].append(
FlexError( "Insufficient data, every <from> must be followed by <to>" ) )
pairs = [ pairs[i*2:i*2+2] for i in range(len(pairs)>>1) ]
# Check for errors
swaplist = ""
pair = "1"
try:
for f, t in pairs:
# Pair counts must be equal
if len(f) != len(t):
raise Exception("Pair " + pair + " count error, both <from> and <to> pane counts must be identical")
# Check for duplicates in the same spot of the same pair
for spot, paneset in [ ("<from>", f), ("<to>", t) ]:
paneid = ([paneid for paneid in paneset if paneset.count(paneid) > 1]+[None])[0]
if paneid:
raise Exception("Pane `" + paneid + "` specified multiple times in " + spot + " of pair " + pair)
# Check for self swap (this could be safely ignored)
for fi, ti in zip(f, t):
if fi == ti:
raise Exception("Pane `" + fi + "` swaps to self in pair " + pair)
# Iterate all panes in this argument and validate
for spot, paneid in zip( ["<from>" for _ in f] + ["<to>" for _ in t], f + t):
if not paneid in PANE_CHARACTERS:
raise Exception("Invalid pane `" + paneid + "` in " + spot + " of pair " + pair)
if paneid in swaplist: # Panes are only permitted to be swapped once
raise Exception("The " + spot + " pane `" + paneid + "` in pair " + pair + " is already swapped")
if not paneid in used: # The pane must already be in use
raise Exception("The " + spot + " pane `" + paneid + "` in pair " + pair + " is not being used")
# Next
swaplist += f + t
pair = str( int(pair) + 1 )
except Exception as error:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( str(error) ) )
# Merge all arguments into a single unified from and to that contains both, so only one direction (f->t) is needed
master_f, master_t = "".join([f for f, _ in pairs]), "".join([t for _, t in pairs])
master_f, master_t = master_f + master_t, master_t + master_f
# Perform the swaps in a single pass now that we know all panes are referenced only once
windowgram_lines = wg.Export_Lines()
wg.Import_Lines( [ "".join(
[ ch if ch not in master_f else master_t[master_f.find(ch)] for ch in list(line) ]
) for line in windowgram_lines ] )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Drag
##
## Analogues:
##
## Scale may be used for drag *
##
## Notes:
##
## Supports limiting pane loss, if the user sets the `limit` parameter to `yes`.
##
## Relative sizes (percentages or multipliers) are based on the amount of space available in the drag direction.
## When dragging a windowgram-aligned edge outward (i.e., expanding the windowgram), there is nothing in that
## direction to base a relative size on, so an absolute size is required.
##
## Pushing neighboring edges to preserve panes was considered, but this would just add to the overhead with no
## present advantage. Perhaps in a future version.
##
## Drag multiple scalegroups now accounts for approximately half of the unit testing time. There is much room for
## optimization, but since use performance is generally very low, such concerns will be ignored for now.
##
## Considerations:
##
## It will be possible to have a parallel-irregular scalegroup. This is to say it only needs to have the group
## edge perpendicular to the edge axis to be a single run, the parallel axis doesn't have to line up like this.
## Adding this check and allowing this exception will add flexibility to the command. This requires a new or
## modified scale core that pins the outer parallel edges. The following examples illustrates the differences
## between these forms.
##
## Rectangular Irregular Irregular
## Perpendicular Parallelism
##
## Illustrated ..aaaa1|2bbbb.. ..AAaa1|2bbBB.. ..AAqq1|2bbbb..
## windowgrams ..aaaa1|2bbbb.. ..AAaa1|2bbBB.. ..AAqq1|2BBBB..
## drag a vertical ..aaaa1|2bbbb.. ..AAaa1|2bbBB.. +-...xaa1|2BBBB..
## edge of 1 and 2 ..aaaa1|2bbbb.. ..AAaa1|2x.BB.. | ..QQaa1|2ssss..
## | |
## The arrows are areas where drag is impossible-+ and +-possible. Note that the "x" is entirely pinned to the
## edge, if it did not exist then "a" would be and that means border "Qa" could not be moved during the scaling.
## This is possible to add support for. Perhaps quietly, since a rectangular requirement is easier to explain to
## the user. So yes, it will be possible to support irregularity on the parallel axis in certain cases, and with
## some caveats. Note that the axis is in relation to drag motion, not edge direction, which are opposites.
##
## Irregular Parallelism:
##
## This has been ruled out for now. The complications arising from this don't seem worthwhile for what little
## convenience may be had. Take a look at this case:
##
## ..XXYYYZZZ| X is fully pinned ..XXYZ| drag right AB:XYZ left 4
## ....YYYZZZ| Y is partially pinned ....YZ|
## .AAABBBZZZ| A is pinned and Z binds panes ABXY .AAABZ| <- Connected; left of B pinned
## ..wwxxyyzz| wxyz are easily isolated ..wxyz| <- Independent
##
## The "connected" line requires a potentially deep analysis of neighbor connectivity, and scale behavior would be
## restricted in a way that could be counter-intuitive to the user. Also, the practicality of this seems limited.
## Many areas of the code would have to be upgraded, including: 1) New scale core that is able to deal with these
## irregular parallelism groups including an algorithm to map out which edges are fixed and which are subject to
## scaling, 2) The rectangular mask check would have to be discarded in favor of scanning the perpendicular scan
## lines to determine validity by testing for holes, 3) Multiple edges would need to be supported, affecting all
## code using the `optimal` edge with the assumption that it is a single edge.
##
## If adding this, probably accept all irregular scalegroups, and simply ignore panes that cannot be scaled.
##
## Easily accomplished by smudging the edge of transparency (":#") to account for any loss in either direction:
## 1) Smudge away from the drag edge (in case smaller)
## 2) Smudge toward the drag edge (in case bigger)
##
## Multiple Edges (multiple scalegroup processing done, splitting of scalegroup is next):
##
## Adding support for multiple edges has been ruled out for now. If added, use a single point fill extraction,
## where the mask is filled, and the resulting rectangle is removed from the mask, it is tested for validity (must
## touch drag edge, be contiguous, and rectangular), then repeats until the mask is empty. The independent scale
## groups that result would replace the two that exist right now. It would be nice to have but not important.
##
## Update: Probably use a faster exhaustive grouping search, first valid set of scalegroups are used.
##
@flex(
command = "drag",
group = "modifiers",
examples = [ "drag 12 r 2", "drag abcd up 25%", "drag 12:abcd l 50%" ],
description = "Drags an edge in the specified direction. The <edge> is defined by the panes that border it. " + \
"A <hint> may be necessary to resolve ambiguity; this is either an axis (VH) or a direction " + \
"(TBLR). Optional scalegroups are supported, add a colon (:) to the <edge> followed by the " + \
"panes to be scaled. To prevent loss of panes, set [limit] to \"yes\".",
aliases = [ ["slide", "drag "], ],
)
def cmd_drag_1(fpp_PRIVATE, edge, direction, size): # No support for limit without some analysis
return cmd_drag_2(fpp_PRIVATE, "", edge, direction, size)
@flex(
command = "drag",
group = "modifiers",
examples = [ "drag v 12 r 2", "drag t abcd up 25%", "drag l 12:abcd l 50%" ],
)
def cmd_drag_2(fpp_PRIVATE, hint, edge, direction, size, limit=None):
wg = fpp_PRIVATE.wg
used, unused = wg.Panes_GetUsedUnused()
# Reduce edge, resolve hint, deduce scalegroup, expand wildcards ... Supports swapping of hint and edge
error, res_hint, res_edge, res_scalegroups = EdgeProcessing.argument_processor(hint, edge, used, unused, True)
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( error ) )
hint = edge = None # From here only use: res_hint, res_edge
# Resolve direction ... Axis (VH) is valid and like split, is based on TL, and is controlled with size negation
res_direction, direction = resolve_vhtblr( direction ), None
if not res_direction: return fpp_PRIVATE.flexsense['notices'].append( FlexError( \
"The direction parameter is unrecognized" ) )
# Get edge from res_hint + res_edge; this yields the official edge axis, required to resolve direction and size
status, res_hint, minimal, optimal = edgecore( wg, res_edge, res_hint )
status_print = EdgeStatus.error2string( status )
if status_print: return fpp_PRIVATE.flexsense['notices'].append( FlexError( \
"Edge specification error: " + status_print ) )
# Exclude contradiction in axis, this must follow edge core since the hint may not have been specified by the user
# Note: If they're equal they're opposite, consider that the edge axis and direction axis are different expressions
if (is_short_axis_vert_vhtblr(res_direction) and is_short_axis_vert_vhtblr(res_hint)) \
or (is_short_axis_horz_vhtblr(res_direction) and is_short_axis_horz_vhtblr(res_hint)):
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The direction must go against the edge axis" ) )
# Reduce direction to "v" or "h"
inverse = "-" if size[0] == "-" else ""
showinv = inverse # Show inverse flag by default
if res_direction != "v" and res_direction != "h":
if size[0] == "-":
return fpp_PRIVATE.flexsense['notices'].append(
FlexError( "Negative size only valid if `direction` is vertical or horizontal" ) )
res_direction, negate_flag = direction_to_axiswithflag(res_direction, True) # Inversed in this context
if res_direction is None or negate_flag is None:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "The hint you specified is invalid" ) )
inverse = "-" if negate_flag else ""
showinv = "" # For TBRL do not show inverse flag
# If edge is along windowgram edge, a relative size (percentage or multiplier) is only valid on contraction
wge = edgecore_windowgramedgealignment(wg, res_hint, minimal[0]) # 0 == na, -1 == tl, -2 == br
if wge and (arg_is_percentage(size) or arg_is_multiplier(size)):
expansion = False if (negate_flag ^ (True if wge == -1 else False)) else True
if expansion:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( \
"Expanding a windowgram by dragging its edge requires an absolute <size>" ) )
# Get necessary metrics, axis location is required to translate to characters, and possibly limit drag movement
wgw, wgh = wg.Analyze_WidthHeight()
axis_length = axis_location = optimal[0][0] # Use optimal since we're dealing with a pane grid
if inverse != "-": axis_length = (wgw if res_direction == "h" else wgh) - axis_length
# Resolve size and refine direction by toggling size negation if direction is VH
# NOTE: If the drag edge falls on the windowgram edge, and the motion is to expand, the available relative space
# is zero, and this action will result in an error when using percentages or multipliers, e.g., "drag r * r 100%"
# The user must specify exact characters when dragging in these situations, or use the scale command which treats
# percentages and multipliers as relative to the windowgram size; drag is relative to the edge-split windowgram.
restrict = False # If true, restrict dragging beyond the windowgram edge
(error, res_sizeinv, res_size), size = resolve_size(size, axis_length, inverse, showinv, restrict), None
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Size error: " + error ) )
# Reduce limitation flag
limit = True if limit is not None and is_true(limit, "limit") else False
# Produce mutually-exclusive side masks
wgm0, wgm1 = Windowgram_Mask_Macro_BuildSplitMasks( wg, res_hint, axis_location )
# Add the edge to the scalegroups if it's valid
wgm0x, wgm1x = Windowgram_Mask_Macro_GenerateAndSplitMasks( wg, wgm0, wgm1, res_edge )
error = Windowgram_Mask_Macro_ValidateRegularity( res_edge, wgm0x, wgm1x, res_hint, axis_location )
if not error:
res_scalegroups.insert( 0, res_edge )
# Validate scalegroups and produce their masks for scaling
for res_scalegroup in res_scalegroups:
# Verify the panes exist before they're needed for anything
undef = wg.Panes_PanesNotUsed_Message(res_scalegroup)
if undef: return fpp_PRIVATE.flexsense['notices'].append( FlexError( \
"Scalegroup '" + res_scalegroup + "' error: " + undef ) )
res_scalegroups_masks = []
for res_scalegroup in res_scalegroups: # Needs optimization
# Fully formed scalegroup expressions now mandatory (no longer inferred by combining edge)
wgm0x, wgm1x = Windowgram_Mask_Macro_GenerateAndSplitMasks( wg, wgm0, wgm1, res_scalegroup )
error = Windowgram_Mask_Macro_ValidateRegularity( res_scalegroup, wgm0x, wgm1x, res_hint, axis_location )
if error:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Unable to drag: " + error ) )
# Mask-extract the scalegroup (areas to be scaled) from the static (the rest of the windowgram)
wgm0s, wgm1s = wg.CopyMasked_Out( wgm0x ), wg.CopyMasked_Out( wgm1x ) # Already supports irregular parallelism
# Add to list
res_scalegroups_masks.append( [ wgm0s, wgm0x, wgm1s, wgm1x ] )
# Drag logic is isolated so it may be reprocessed as necessary
def drag(count, edgeruns, wg, res_scalegroups_masks):
res_scalegroups_masks_modified = []
for wgm0s, wgm0x, wgm1s, wgm1x in res_scalegroups_masks:
# Scale this scalegroup
def modifier(which, val, size, inv):
return ( val + ( -size if ( ( inv == "-" ) ^ ( which is not 0 ) ) else size ) ) if val else 0
sw0, sh0 = wgm0s.Analyze_WidthHeight()
sw1, sh1 = wgm1s.Analyze_WidthHeight()
if res_direction == "h" and sw0: sw0 = modifier(0, sw0, count, inverse)
if res_direction == "h" and sw1: sw1 = modifier(1, sw1, count, inverse)
if res_direction == "v" and sh0: sh0 = modifier(0, sh0, count, inverse)
if res_direction == "v" and sh1: sh1 = modifier(1, sh1, count, inverse)
if wgm0s.Panes_Exist(): wgm0s = Windowgram( scalecore( wgm0s.Export_String(), sw0, sh0 ) )
if wgm1s.Panes_Exist(): wgm1s = Windowgram( scalecore( wgm1s.Export_String(), sw1, sh1 ) )
# Smudge the edge across the masks (trivial, default behavior)
if wgm0x.Panes_Exist(): wgm0x = smudgecore( wgm0x, axis_location, res_hint, count, inverse )
if wgm1x.Panes_Exist(): wgm1x = smudgecore( wgm1x, axis_location, res_hint, count, inverse )
# Append to modified list
res_scalegroups_masks_modified.append( [ wgm0s, wgm0x, wgm1s, wgm1x ] )
# Smudge the edge across the static (simple, but exhaustive stepping process)
# Basic behavior: with the edge, after each increment of the drag, modify the edge and proceed
# (1) Any-Edge-Outward NOT on outer edge Modifies edgerun until outer edge is reached
# (2) Any-Edge-Outward on outer edge Full run (windowgram expansion)
# (3) Outer-Edge-Inward Full run (windowgram contraction / transparency overwrite)
# This behavior should be controlled here, as it's not required by other planned commands
move = -1 if inverse == "-" else 1
while count and wg.Panes_Exist():
wgw, wgh = wg.Analyze_WidthHeight() # Required reload, smudgecore may contract or expand the windowgram
location = edgecore_edgetoedge(res_hint, edgeruns[0], wgw, wgh) # Detects drag edge touching windowgram edge
if location == -1 or location == 1: move = move * count # Full run for the remaining drag ... (2), (3)
wg = smudgecore( wg, edgeruns[0][0], res_hint, abs(move), inverse, edgeruns[0] ) # Full scan if on wg edge
edgeruns = [ [ edgeruns[0][0] + move, edgeruns[0][1], edgeruns[0][2] ] ] # Move the edge
windowgram_parsed = wg.Export_Parsed() # Required to rebuild edge
edgeruns = edgecore_buildoptimal( windowgram_parsed, res_hint, "", edgeruns )
count -= abs(move)
# Paste the each scalegroup over the static
for wgm0s, wgm0x, wgm1s, wgm1x in res_scalegroups_masks_modified:
wg.CopyMasked_In( wgm0x, wgm0s )
wg.CopyMasked_In( wgm1x, wgm1s )
# Return result for analysis
return wg
# Perform drag. Result must be valid according to: windowgram must not be zero, no loss of panes if user selects.
# This basically starts with the full drag, and if that fails it divides down until the maximum valid drag possible
# has been found. Prints warning if actual drag differs from the user's specification.
# TODO: This may be optimized by checking the range on the dynamic masks, and the nearest neighboring panes on the
# edge for the static, to find a more efficient maximum range. The scale algorithm does not lend itself to such
# reasoning, so windowing will still need to be done to assure that no panes are lost, but if there are no scale
# masks to begin with then the maximum range is trivially discovered.
# NOTE: Because scale produces aliasing artifacts, the only way to know the real maximum is to work backwards from
# the desired maximum one step at a time. Keeping the divide for now because it should be faster on average, in
# situations where this search is performed.
wgout = wg.Copy()
error = None
chars = res_size
chars_min = 0
chars_max = chars + 1
while True:
if chars == 0:
error = "Drag without losing panes is not possible using the given parameters"
wgout = wg.Copy()
break
wgout = drag( chars, copy.deepcopy(optimal), wg.Copy(), res_scalegroups_masks )
wgw, wgh = wgout.Analyze_WidthHeight()
if not wgw or not wgh or (limit and PaneList_DiffLost( wg, wgout )):
chars_max = chars
else:
chars_min = chars
span = chars_max - chars_min
if span == 1:
if chars == chars_min: break
chars = chars_min
else:
chars = chars_min + (span // 2)
# Report error or warning
if error:
fpp_PRIVATE.flexsense['notices'].append( FlexError( error ) )
else:
lostpanes = PaneList_DiffLost( wg, wgout )
if lostpanes:
fpp_PRIVATE.flexsense['notices'].append( FlexWarning( \
"The drag action resulted in " + str(len(lostpanes)) + " lost panes: " + lostpanes ) )
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wgout )
# TODO: Send actual drag value back to the caller in some way, required for interactive use
##
## Flex: Insert
##
## Analogues:
##
## Insert at windowgram edge may be used for add
##
## Alternatives:
##
## The `insert` command necessarily expands the windowgram. If you want to add a pane without affecting the size
## of the windowgram, you should use `break` or `split`.
##
## Scaling:
##
## There are two possible scale behaviors for the scalegroup where its edges are aligned to the edge of insertion:
##
## 1) Handling the scalegroup as a whole and scaling it independent of the edge (only if crossing edge)
## * Useful for neighboring grids: They would not necessarily have to be pinned to the [spread]
## * Mixed behavior: If the scalegroup doesn't cross the edge, it would be pinned to [spread]
##
## 2) Splitting the scalegroup at edge of insertion and scale the two sides independently at [spread] position
## * Behavioral consistency (and alignment) with any edge panes that are not part of the scalegroup
##
## Illustration using the hypothetical command "insert vertical 23:STQL x" with both algorithms contrasted:
##
## original spread = "50%" spread = "100%"
##
## 1111222233334444 11112222xxxxxxxx33334444 11112222xxxxxxxx33334444 insertion
## aligned > SSSSTTTTQQQQLLLL SSSSTTTTTTTTQQQQQQQQLLLL SSSSTTTTTTTTTTTTQQQQLLLL noscalegroup
## SSSSSSTTTTTTQQQQQQLLLLLL SSSSSSTTTTTTQQQQQQLLLLLL scalegroup1
## SSSSSSTTTTTTQQQQQQLLLLLL SSSSSSSSTTTTTTTTQQQQLLLL scalegroup2
##
## 1111222233334444 11112222xxxxxxxx33334444 11112222xxxxxxxx33334444 insertion
## unaligned > XXXXXYYYYYYZZZZZ XXXXXYYYYYYYYYYYYYYZZZZZ XXXXXYYYYYYYYYYYYYYZZZZZ noscalegroup
## XXXXXXXXYYYYYYYYZZZZZZZZ XXXXXXXXYYYYYYYYZZZZZZZZ scalegroup1
## XXXXXXXXYYYYYYYYZZZZZZZZ XXXXXXXXYYYYYYYYZZZZZZZZ scalegroup2
##
## Although there are going to be ideal situations for both, scalegroup1 is the default because it gives users more
## control over neighboring grids, allowing consistent scaling that may otherwise be distorted if [spread] changes.
##
## Two features that won't be difficult to implement that would provide the full range of functionality:
## a) Multiple, independently functioning scalegroups, e.g., "<hint>@<edge>:<sg1>:<sg2>:<sg3>"
## b) Flag toggle for the scalegroup that changes the default behavior, e.g, by adding "!" into the scalegroup
##
## When implementing multiple scalegroups (requires irregular support), it should also be added to the `drag`
## command. Note that the different scale behavior is not applicable to `drag`, only to `insert` and `clone`.
##
## TODO:
##
## This command should eventually support irregular scalegroups, to the extent that they may be split into multiple
## regular scalegroups. This feature will be shared with the `drag` command for similar purposes. Note that the
## scalegroups for `insert` must touch (or cross) the insertion edge to be considered valid, in addition to being
## rectangular and whole.
##
@flex(
command = "insert",
group = "modifiers",
examples = [ "insert XZ 10" ],
description = "Inserts a pane into an edge and expands the windowgram accordingly. The <edge> is defined by " + \
"the panes that border it. A <hint> may be necessary to resolve ambiguity. Optional " + \
"scalegroups are supported, add a colon (:) to the <edge> followed by the panes to be scaled. " + \
"The [spread] is top/left and defaults to 50%.",
aliases = [ ],
)
def cmd_insert(fpp_PRIVATE, edge, size):
return cmd_insert_2(fpp_PRIVATE, "", edge, size)
@flex(
command = "insert",
group = "modifiers",
examples = [ "insert right * 10", "insert vert 1245 1", "insert top X 50%", "insert right Z 5 z 75%" ],
)
def cmd_insert_2(fpp_PRIVATE, hint, edge, size, newpane=None, spread=None):
wg = fpp_PRIVATE.wg
used, unused = wg.Panes_GetUsedUnused()
# Reduce edge, resolve hint, deduce scalegroup, expand wildcards ... Supports swapping of hint and edge
error, res_hint, res_edge, res_scalegroups = EdgeProcessing.argument_processor(hint, edge, used, unused, True)
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( error ) )
hint = edge = None
# Get edge from res_hint + res_edge; this yields the official edge axis, required to resolve direction and size
status, res_hint, minimal, optimal = edgecore( wg, res_edge, res_hint )
status_print = EdgeStatus.error2string( status )
if status_print:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Edge specification error: " + status_print ) )
# Get necessary metrics
wgw, wgh = wg.Analyze_WidthHeight()
axis_location = optimal[0][0]
# Axis length is of the combined edge panes, on the axis opposite of insertion; easiest to derive using a mask
_, _, pw, ph = Windowgram_Mask_Generate(wg, res_edge).Panes_PaneXYWH(MASKPANE_1)
axis_length = pw if res_hint == "v" else ph
# Resolve size ... If percentage or multiplier, this will be based on the axis_length of the combined edge panes
(error, res_sizeinv, res_size), size = resolve_size(size, axis_length, "", "", False), None
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Size error: " + error ) )
# Resolve spread parameter
if spread is None: spread = "50%"
if size_ValidUnit(spread) is False: # Ignore None (not percentage or multiplier) and True (valid range)
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Spread must be equivalent the range 0% <-> 100%" ) )
res_spread = size_ConvertToCharacters(spread, res_size + 1) # Favor top/left
if res_spread == res_size + 1 and size_ValidUnit(spread) is True: res_spread = res_size # Favor top/left
if res_spread < 0 or res_spread > res_size:
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Spread value is not in range, try a percentage" ) )
spread = None
# Verify newpane ... Set to first available if not specified
if len(unused) < 1: return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Insufficient panes for insert" ) )
if newpane is None: newpane = unused[0]
if len(newpane) > 1: return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Parameter newpane exceeds the function maximum of one pane" ) )
for ch in set(newpane):
if not ch in PANE_CHARACTERS: return fpp_PRIVATE.flexsense['notices'].append( FlexError(
"Invalid pane in newpane parameter: " + ch ) )
panes_in_use = newpane if newpane in used else ""
panes_in_use_message = panes_in_use_message_generate( panes_in_use )
if panes_in_use_message: return fpp_PRIVATE.flexsense['notices'].append( FlexError( panes_in_use_message ) )
used, unused = newpanes_RebuildPaneListsInPreferentialOrder( used, unused, newpane )
# Create a blank target windowgram compatible with masking (used for error checking)
wgw_new = wgw if res_hint == "h" else wgw + res_size
wgh_new = wgh if res_hint == "v" else wgh + res_size
wgout = Windowgram("", True).Load_Parsed(ParsedPanes_Add(MASKPANE_X, dict(x=1, w=wgw_new, y=1, h=wgh_new)))
# For readability, let's transpose our windowgrams, and work with a consistent vertical edge
def transposer():
nonlocal \
wg, wgout
wg, wgout = Windowgram_Convert.Transpose_Windowgrams( \
wg, wgout )
if res_hint == "h": transposer()
# Find panes that border the minimal edge, it may be different than res_edge ("12" from "right 1" or "vertical 12")
res_edgepanes = wg.Edge_PanesAlongSub("v", minimal[0]) # Transposed hint is "v"
# Two pass assembly; there will be overdraw with a scalegroup, but layering is simpler and the result is identical
# Pass 1:
# A1) Fill in the new inserted pane (minimal edge)
# A2) Copy the locked portions of the surrounding gap (optimal edge) / inline smudgecore
# A3) Fill in the surrounding gap according to the [spread] value (windowgram split) / inline smudgecore
# A4) Copy the original windowgram halves
l, m, r = axis_location, axis_location + res_spread, axis_location + res_size
wgc_i, wgc_o, wgc_x = wg.Export_Chars(), wgout.Export_Chars(), []
wge = edgecore_windowgramedgealignment(wg, "v", minimal[0]) # Transposed hint is "v" / 0 == na, -1 == tl, -2 == br
if wge and optimal != minimal: return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Pass 1 edge error" ) )
def lock_detection(res_edgepanes, wgc_i, wge, xpos, ypos, threshold):
if ypos == threshold or wge: return None # No lock processing (A2), also if wge != 0 then optimal == minimal
return 0 if wgc_i[ypos][xpos-1] in res_edgepanes else -1 if wgc_i[ypos][xpos] in res_edgepanes else 0
lock_t = lock_detection(res_edgepanes, wgc_i, wge, l, minimal[0][1]-1, -1)
lock_b = lock_detection(res_edgepanes, wgc_i, wge, l, minimal[0][2], len(wgc_i))
for y, (row_i, row_o) in enumerate(zip(wgc_i, wgc_o)):
run = []
for x in range(len(row_o)):
if x >= l and x < r:
if y >= minimal[0][1] and y < minimal[0][2]: run.append( newpane ) # A1
elif y >= optimal[0][1] and y < optimal[0][2]:
if y < minimal[0][1]: run.append( row_i[l + lock_t] ) # A2
if y >= minimal[0][2]: run.append( row_i[l + lock_b] ) # A2
else:
if wge: run.append( row_i[l + wge + 1] ) # A3
else: run.append( row_i[l-1] if x < m else row_i[l] ) # A3
else:
run.append( row_i[x if x < l else (x-res_size)] ) # A4
wgc_x.append( run )
wgout.Import_Chars(wgc_x)
if wgout.Panes_HasPane(MASKPANE_X): # The entire output windowgram should have been overwritten or an error occurred
return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Pass 1 error: Windowgram was not fully replaced" ) )
# Pass 2:
for res_scalegroup in res_scalegroups:
# Verify the panes exist before they're needed for anything
undef = wg.Panes_PanesNotUsed_Message(res_scalegroup)
if undef: return fpp_PRIVATE.flexsense['notices'].append( FlexError( \
"Scalegroup '" + res_scalegroup + "' error: " + undef ) )
# Panes touching insertion don't scale
res_scalegroup, _ = PaneList_MovePanes( res_scalegroup, "", res_edgepanes )
# Build two scalegroup masks, for input and output, then verify the scalegroup is valid
wgms_i = Windowgram_Mask_Generate(wg, res_scalegroup)
wgms_o = Windowgram_Mask_Generate(wgout, res_scalegroup)
error = Windowgram_Mask_Macro_ValidateRegularity(
res_scalegroup, wgms_i, wgms_o, "v", axis_location ) # Transposed hint is "v"
if error: return fpp_PRIVATE.flexsense['notices'].append( FlexError( "Unable to insert: " + error ) )
# Extract the scalegroup from the source windowgram into its own cropped windowgram
wgcrop = wg.CopyMasked_Out(wgms_i)
# Scale the cropped windowgram according to the output mask dimensions
_, _, sw, sh = wgms_o.Panes_PaneXYWH(MASKPANE_1)
wgcrop = Windowgram( scalecore( wgcrop.Export_String(), sw, sh ) )
# Paste the scaled cropped windowgram into the output windowgram using the output mask
wgout.CopyMasked_In(wgms_o, wgcrop)
# Restore original orientation
if res_hint == "h": transposer()
# Replace windowgram
fpp_PRIVATE.wg.Import_Wg( wgout )
##
## Flex: Clone
##
## Analogues:
##
## Cloning one pane is the same as insert
##
## Implementation Sketch:
##
## This command takes a group of panes (forming a full rectangle), and along the specified edge, inserts it into
## place with new pane ids. If necessary, it will stretch the surrounding windowgram to accommodate, exactly like
## break does. Most useful for rapidly expanding common windowgram patterns.
##
## clone <group> <edge> [newpanes]
## clone <group> <hint> <edge> [newpanes]
##
## The [newpanes] argument follows the same order as the first <group> parameter.
##
## Possibly implement two modes: insert and replace. Insert is described above. Replace would be replacing a
## group of panes with the cloned set. Needs an unobtrusive qualifier.
##
## This is something of a metafunction, in that it depends on other flex commands, namely insert.
##
## The clone command will be very easy to implement:
##
## 1) copy it out Extract group of panes to be cloned
## 2) insert ... Calls insert command ... Counter edge scale to fit
## 3) scale ... Scales up (assuring no loss of panes) to accommodate ... Edge scale to fit
## 4) rename ... Rename the target panes so they don't conflict with the group being cloned
## 5) copy it in Replace the insert pane with the renamed group
##
# TODO
##
## Flex: Delete
##
## Analogues:
##
## None
##
## Alternatives:
##
## The `delete` command necessarily contracts the windowgram. If you want to delete a pane without affecting the
## size of the windowgram, you should use `join` or `drag`.
##
## Implementation Sketch:
##
## Removes the pane(s) and contracts the windowgram accordingly. If the user does not desire the contraction that
## is the effect of this action, the user could use the `join` command, which combines multiple panes into one
## while preserving the surrounding windowgram. The contraction will only truncate neighboring panes, there will
## never be a total loss of panes other than those the user deletes.
##
## delete <panes> remove one or more panes anywhere in windowgram
##
## Aliases: del, clip, remove, drop, rm
##
## Axial Contraction Bias:
##
## When deleting the pane(s), it contracts only one axis. The axis selected is the one where no pane loss occurs.
## If pane loss occurs on both axes, then it fails with an error that suggests the use of `join`. There appears to
## be no situation where there is no pane loss on either axis. This is demonstrated with the trivial arrangements
## shown below.
##
## 0 = deleting X = truncated a = unaffected
##
## XXaaaa XXaaaa | XXXXXX XX/
## XX0000 /\ | aa0000 aa\
##
## Anything more complicated will necessarily introduce deletable neighboring panes, so long as the basic geometric
## rules of the windowgram are enforced (as they automatically are with flex). Consider Example (A) below, where,
## when deleting "0", it's impossible to avoid total pane loss in "1234aAbB" without incurring it in "xX".
##
## (A) | (B) | (C)
## | |
## 111222 \/ | |
## axxxxb 111222 | 111222 12 | a1122b \/
## a0000b axxxxb | a0000b \ab/ | a0000b a1122b
## A0000B AXXXXB | A0000B /AB\ | A0000B A3344B
## AXXXXB 333444 | 333444 34 | A3344B /\
## 333444 /\ | |
##
## Similarly, it's impossible to modify the windowgram to permit a situation where there is no total pane loss, as
## Examples (B) and (C) suggest. There is either pane loss on both axes (as with a grid), or there's permissible
## contraction on one axis. Nothing else. This is all inherent in the geometry constraints of the windowgram.
##
## Implementation summary: The `delete` command will identify and affect the contractible axis, or suggest to the
## user the alternative `join` command.
##
# TODO
##
## Flex: Mirror
##
## Analogues:
##
## None
##
## Implementation Sketch:
##
## mirror [group] mirrors, with optional group specification (supports *)
##
# TODO: Optional group
@flex(
command = "mirror",
group = "modifiers",
description = "Reverse horizontally (left/right)",
)
def cmd_mirror(fpp_PRIVATE):
wg = fpp_PRIVATE.wg
windowgram_lines = wg.Export_Lines()
wg.Import_Lines( [ "".join( [ ch for ch in reversed(list(line)) ] ) for line in windowgram_lines ] )
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Flip
##
## Analogues:
##
## None
##
## Implementation Sketch:
##
## flip [group] flips, with optional group specification (supports *)
##
# TODO: Optional group
@flex(
command = "flip",
group = "modifiers",
description = "Reverse vertically (top/bottom)",
)
def cmd_flip(fpp_PRIVATE):
wg = fpp_PRIVATE.wg
windowgram_lines = wg.Export_Lines()
wg.Import_Lines( reversed(windowgram_lines) )
fpp_PRIVATE.wg.Import_Wg( wg )
##
## Flex: Rotate
##
## Analogues:
##
## None
##
## Implementation Sketch:
##
## rotate <how> how == cw, ccw, 180, interpret 1..3 and -1..-3 as multiples of 90
## rotate <group> <how> [force] group == fully formed rectangle, force == rotate non-square
##
## Notes:
##
## Rotating a group of panes (that is not the full windowgram) should be limited to an even square by default.
## A non-square group requires setting the force flag, since it must enlarge the windowgram to accommodate.
##
# TODO
|
syzygy/scripts/benchmark/dromaeo.py | nzeh/syzygy | 343 | 12726173 | <reponame>nzeh/syzygy
#!python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import optparse
import sys
import threading
import urlparse
import zip_http_server
# The set of address values that map to 'localhost'.
_LOCALHOST_ADDRESSES = (None, '', '0.0.0.0', '127.0.0.1')
class RequestHandler(zip_http_server.ZipFileRequestHandler):
"""A request handler class that handles incoming JSON post requests by
storing the metrics and shutting down the server."""
def do_POST(self):
"""A handler for the POST method."""
post_body = None
try:
# Read the posted content.
content_len = int(self.headers.getheader('content-length'))
post_body = self.rfile.read(content_len)
# The json is packed into a data argument.
data = urlparse.parse_qs(post_body)['data'][0]
# Stash the metrics in the server.
results = json.loads(data)
self.server.SetResults(results)
# Send back a plain-text version of the data.
pretty_data = json.dumps(results, sort_keys=True, indent=2)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(pretty_data))
self.end_headers()
self.wfile.write(pretty_data)
except Exception, error:
message = str(error)
self.send_response(400)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(message))
self.end_headers()
self.wfile.write(message)
class DromaeoServer(zip_http_server.HTTPZipFileServer):
"""This class implements a runnable HTTP server that serves the dromaeo
benchmark from a ZIP archive.
"""
def __init__(self, zip_file, address='', port=0, request_handler_class=None):
# Use the default request handler if no over-ride is specified.
if request_handler_class is None:
request_handler_class = RequestHandler
# Initialize the base class.
server_address = (address, port)
zip_http_server.HTTPZipFileServer.__init__(
self, server_address, request_handler_class, zip_file)
# The results and an event to track when they get set.
self._results = None
self._results_have_been_set = threading.Event()
def Run(self):
"""Runs the HTTP server in a background thread."""
thread = threading.Thread(target=self.serve_forever)
thread.daemon = True
thread.start()
def SetResults(self, results):
"""Stores the results of the benchmark and sets an event to notify any other
thread waiting on the results.
"""
self._results = results
self._results_have_been_set.set()
def HasResults(self):
"""Returns true if the results have been set."""
return self._results is not None
def GetResults(self):
"""Returns the results or None."""
return self._results
def WaitForResults(self, timeout):
"""Blocks until results have been set, or the timeout duration elapses."""
self._results_have_been_set.wait(timeout)
def Reset(self):
"""Resets the event notification of the results being set."""
self._results_have_been_set.clear()
def GetUrl(self):
"""Returns the URL at which the dromaeo benchmark is running."""
address, port = self.server_address
if address in _LOCALHOST_ADDRESSES:
address = 'localhost'
return 'http://%s:%d/?dom&automated&post_json' % (address, port)
def FormatResultsAsText(self):
"""Prints a dromaeo result set in a nicely human readable format."""
if not self.HasResults():
return 'None'
sorted_results = sorted(self._results.iteritems())
return '\n'.join(' %s : %s' % kv for kv in sorted_results)
def main(argv):
# Setup the argument parser.
parser = optparse.OptionParser()
parser.add_option('-a', '--address', default='', help='The address to bind.')
parser.add_option('-p', '--port', type='int', default=0,
help='The port to bind (by default, the server will '
'randomly select an available port).')
parser.add_option('-z', '--zip-file', default='./dramaeo.zip',
help='The zipfile containing the dramaeo resources '
'(default: %default).')
parser.add_option('-t', '--timeout', type='int', default=300,
help='The maximum time to wait for results, in seconds'
'(default: %default).')
# Parse the arguments.
options, extra = parser.parse_args(argv)
if extra:
parser.error('Unexpected arguments: %s' % extra)
# Create the server.
server = DromaeoServer(zip_file=options.zip_file,
address=options.address,
port=options.port)
# Run the server in another thread.
print "Starting dromaeo server."
server.Run()
print "URl: %s" % server.GetURL()
try:
server.WaitForResults(options.timeout)
except KeyboardInterrupt:
pass
server.shutdown()
# Print the results to the console.
if not server.HasResults():
print "Timed out or interrupted while waiting for results."
return 1
else:
print server.FormatResultsAsText()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
tools/isolate/trace_child_process.py | Scopetta197/chromium | 212 | 12726209 | <filename>tools/isolate/trace_child_process.py
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Waits for the go signal and replaces itself with the command to be run.
Not meant to be used directly, only meant to be used by trace_inputs.py.
"""
import os
import subprocess
import sys
def main():
signal = 'Go!'
value = sys.stdin.read(len(signal))
assert value == signal
sys.stdin.close()
# Replace the executable with an absolute path to make it easier to grok.
cmd = sys.argv[1:]
cmd[0] = os.path.abspath(cmd[0])
if cmd[0].endswith('.py'):
cmd.insert(0, sys.executable)
p = subprocess.Popen(cmd)
#print 'Child pid: %d' % p.pid
p.wait()
return p.returncode
if __name__ == '__main__':
sys.exit(main())
|
docs_src/path_operation_advanced_configuration/tutorial002.py | Aryabhata-Rootspring/fastapi | 53,007 | 12726244 | from fastapi import FastAPI
from fastapi.routing import APIRoute
app = FastAPI()
@app.get("/items/")
async def read_items():
return [{"item_id": "Foo"}]
def use_route_names_as_operation_ids(app: FastAPI) -> None:
"""
Simplify operation IDs so that generated API clients have simpler function
names.
Should be called only after all routes have been added.
"""
for route in app.routes:
if isinstance(route, APIRoute):
route.operation_id = route.name # in this case, 'read_items'
use_route_names_as_operation_ids(app)
|
notebooks/work-in-progress/2018-10_SceneGraphParsing/train_grapher.py | AnveshAeturi/deep-learning-workshop | 486 | 12726245 | <gh_stars>100-1000
import os, sys
import argparse
import random
import time, pytz
from datetime import datetime, timezone
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import h5py
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
#from text_utils import TextEncoder # This is my version
sys.path.append('./pytorch-openai-transformer-lm')
from model_pytorch import TransformerModel, load_openai_pretrained_model, DEFAULT_CONFIG
from model_pytorch import Conv1D, Block
from opt import OpenAIAdam
from utils import ResultLogger
pretrained_model_path = os.path.join('.', 'finetune-transformer-lm', 'model')
# TODO : Fn to get list of relationship_types and relationship_templates for each type
# Props to : https://github.com/rasbt/deep-learning-book/blob/master/code/model_zoo/pytorch_ipynb/custom-data-loader-csv.ipynb
class Hdf5Dataset(Dataset):
"""Custom Dataset for loading entries from HDF5 databases"""
def __init__(self, h5_path, vocab_count, valid_indices=None): # transform=None,
self.h5f = h5py.File(h5_path, 'r')
features = self.h5f['features']
self.valid_indices=valid_indices
if valid_indices is None:
self.num_entries = features.shape[0]
else:
self.num_entries = len(valid_indices)
#self.transform = transform
self.n_ctx = features.shape[1]
self.postitional_encoder = np.arange(vocab_count, vocab_count + self.n_ctx)
def __getitem__(self, index):
if self.valid_indices is not None: # find on-disk index
index = self.valid_indices[index]
features = self.h5f['features'][index]
labels = self.h5f['labels'][index].astype(np.int64)
deps = self.h5f['deps'][index].astype(np.int64)
# Find the token_clf
#token_clf_pos = np.nonzero( features==token_clf )[-1].sum() # This is zero if it is not found
#if token_clf_pos>=features.shape[0]-1:
# #print("token_clf_pos right at end, index=", index, token_clf_pos, features.shape[0]-1)
# token_clf_pos=features.shape[0]-2 # Need to have this location, and the next one
#if self.transform is not None:
# features = self.transform(features)
#xmb[:, :, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
#xmb[:, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx) # This is a single row, of batch=1
#features_with_positions = np.stack( [ features, self.postitional_encoder ], axis=1 )
features_with_positions = np.stack( [ features, self.postitional_encoder.copy() ], axis=1 ) # May be safer when multithreaded?
#print(features.shape, features_with_positions.shape) # (128,) (128, 2)
#unanswerable=False
#if 3 not in list(labels): # There is no answer to this question
# unanswerable=True
#if 4 not in list(labels): # There is no answer to this question
# unanswerable=True
#print(token_clf_pos, unanswerable)
#if unanswerable:
# if False:
# labels[0]=4 # end is before start
# labels[1]=3
# if True:
# labels[token_clf_pos ] = 4 # end is before start
# labels[token_clf_pos+1] = 3
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.clip.html
np.clip(deps, 0, self.n_ctx-1, out=deps)
return features_with_positions, labels, deps
def __len__(self):
return self.num_entries
def close(self):
self.h5f.close()
class StepwiseClassifierModel(nn.Module):
""" Transformer with stepwise classifier(s) """
def __init__(self, cfg, n_classifier=None, one_hot=True, vocab_count=None, n_ctx=128, extra_blocks=1): # 40990
super(StepwiseClassifierModel, self).__init__()
self.n_embd = cfg.n_embd
self.n_ctx = n_ctx
self.n_classifier = n_classifier
self.extra_blocks = extra_blocks
self.transformer = TransformerModel(cfg, vocab=vocab_count+n_ctx, n_ctx=n_ctx)
#block = Block(n_ctx, cfg, scale=True)
#self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(cfg.n_layer)])
## Add the attention pointer idea
if extra_blocks==1: # Just set to ==1 for now
# First : Add an additional transformer layer
self.full_block = Block(n_ctx, cfg, scale=True)
# BBBUUUTTT :: force it into full-attentional mode ::
#self.full_block.attn.register_buffer('b', torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.full_block.attn.register_buffer('b', (torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.stepwise_dropout = nn.Dropout(cfg.clf_pdrop)
self.stepwise_classifier = Conv1D(n_classifier, 1, self.n_embd)
self.attn_dropout = nn.Dropout(cfg.attn_pdrop)
self.c_attn = Conv1D(self.n_embd*2, 1, self.n_embd)
def forward(self, x): # x is the input text
## NO : x ~ np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32) # This is for their 0 vs 1 model
# x ~ np.zeros((n_batch, n_ctx, 2), dtype=np.int32) # This is more normal use-case
# x[..., -1] is for [input_sequence, positions]
h = self.transformer(x) # These are the transformers embeddings (n_batch, n_ctx, n_embd)
if self.extra_blocks==1: # This can look forwards too
h = self.full_block(h)
# for classification step-wise
h_stepwise_input = self.stepwise_dropout(h)
task_logits = self.stepwise_classifier( h_stepwise_input ).permute( 0, 2, 1) # CrossEntropy expects classifier to be in second position
#print("task_logits.size()=", task_logits.size() )
# task_logits.size()= torch.Size([8, 5, 128]) (n_batch, n_classifier, n_ctx)
# ~ Attention.forward
h_attn_input = self.stepwise_dropout(h)
attn = self.c_attn(h_attn_input)
# reshape for query and key
query, key = attn.split(self.n_embd, dim=2)
# ~ Attention.split_heads(self, x, k=False):
#new_h_shape = h.size()[:-1] + (1 , h.size(-1)) # Insert an extra dimension
#query = query.view(*new_h_shape).permute(0, 2, 1, 3)
#key = key.view( *new_h_shape).permute(0, 2, 3, 1)
#query = query.view(*new_h_shape).permute(0, 1, 3)
# Above can be simplified, since we don't need to get too fancy...
key = key.permute(0, 2, 1)
#print( "query.size()=", query.size())
# query.size()= torch.Size([8, 128, 768]) = batch, time_step, matcher
#print( "key.size()=", key.size())
# key.size()= torch.Size([8, 768, 128]) = batch, matcher, time_step
# ~ Attention._attn(self, q, k, v):
w = torch.matmul(query, key)
if True: # self.scale:
w = w / np.sqrt(self.n_embd) # simple scaling, since we're adding up a dot product
# Now, we have a weighting matrix (logits) over the different locations
#w = nn.Softmax(dim=-1)(w) # Don't do this here, since we use pure logits with the loss_fn
#print("w.size()=", w.size())
# w.size()= torch.Size([8, 128, 128]) ( thinking about it : batch, time_step, position_score )
attn_logits = w.permute(0, 2, 1) # CrossEntropy expects classifier to be in second position ( batch, position_score, time_step )
return task_logits, attn_logits
def run_predictions(test_loader=None, output_file=None):
print("run_predictions() -> %s" % (output_file, ))
model_stepwise.eval()
labels_arr, deps_arr = [], []
for idx, (features, labels, deps) in enumerate(test_loader):
#features, labels, deps = features.to(device), labels.to(device), deps.to(device)
features = features.to(device)
out_class_logits, out_deps_logits = model_stepwise(features)
# Ok, so now what...
# Just save off the argmax(out_class_logits) and argmax(out_deps_logits)
_, labels_predicted = torch.max( out_class_logits, 1)
_, deps_predicted = torch.max( out_deps_logits, 1)
# print( labels_predicted.shape, deps_predicted.shape ) # on P100s : torch.Size([512, 32]) torch.Size([512, 32])
labels_arr.append( labels_predicted.detach().cpu().numpy().astype(np.uint8) )
deps_arr.append( deps_predicted.detach().cpu().numpy().astype(np.uint8) )
if (idx+1) % 10 == 0:
print('%.1f%% of predictions' % (idx / float(len(test_loader)) * 100, ), end='\r')
#break
#np.savez(output_file, labels=np.array( labels_arr ), deps=np.array( deps_arr ), )
np.savez(output_file, labels=np.vstack( labels_arr ), deps=np.vstack( deps_arr ), )
"""
import numpy as np
a=np.array([[1,2,3],[4,5,6]])
b=np.array([[7,6,5],[4,8,6]])
np.vstack([a,b])
#array([[1, 2, 3],
# [4, 5, 6],
# [7, 6, 5],
# [4, 8, 6]])
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint", default=None, type=str, help="model checkpoint path to restart training")
parser.add_argument('--path', type=str, default='./bist-parser/preprocess/output')
parser.add_argument('--stub', type=str, default='all', help="Description")
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
#parser.add_argument('--n_valid', type=int, default=374)
# Standard for pre-trained model START
parser.add_argument('--n_embd', type=int, default=768) # This is the internal feature width
parser.add_argument('--n_head', type=int, default=12)
parser.add_argument('--n_layer', type=int, default=12)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--afn', type=str, default='gelu')
# Standard for pre-trained model END
parser.add_argument('--encoder_path', type=str, default=pretrained_model_path+'/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default=pretrained_model_path+'/vocab_40000.bpe')
parser.add_argument('--relation_hdf5', type=str, default='coco_train.conll_v32.hdf5')
parser.add_argument('--tokens_special', type=int, default=3) # Printed out by relation_split_to_hdf5
parser.add_argument('--token_clf', type=int, default=40480) # Printed out by relation_split_to_hdf5
parser.add_argument('--vocab_count', type=int, default=40481) # Printed out by relation_split_to_hdf5
#parser.add_argument('--n_ctx', type=int, default=32) # Max length of input texts in bpes - get this from input hdf5 shapes
# class : 0=IGNORE, 1=same, 2=SUBJECT-OBJECT, 3=VERB'S-OBJECT, 4=ATTRIB, 5=VERB
parser.add_argument('--n_classes', type=int, default=6) # #label classes = len({0, 1, 2,3, 4, 5})
parser.add_argument('--batch_size_per_gpu', type=int, default=128) # 9.6Gb on TitanX
parser.add_argument('--n_epoch', type=int, default=4)
parser.add_argument("--tz", type=str, default='Asia/Singapore', help="Timezone for local finish time estimation")
parser.add_argument('--dep_fac', type=float, default=5.0)
parser.add_argument('--extra_blocks', type=int, default=1)
parser.add_argument('--predict', action='store_true')
args = parser.parse_args()
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
tz = pytz.timezone(args.tz)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device", device, "n_gpu", n_gpu)
token_clf = args.token_clf
relation_hdf5 = os.path.join(args.path, args.relation_hdf5)
train_dataset = Hdf5Dataset(h5_path=relation_hdf5, vocab_count=args.vocab_count)
train_size = len(train_dataset)
n_ctx = train_dataset.n_ctx
batch_size = args.batch_size_per_gpu
n_gpus = torch.cuda.device_count()
if n_gpus > 1: # https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
batch_size *= n_gpus
n_updates_total = (train_size // batch_size) * args.n_epoch
model_stepwise = StepwiseClassifierModel(args, n_classifier=args.n_classes, n_ctx=n_ctx,
vocab_count=args.vocab_count, extra_blocks=args.extra_blocks)
model_opt = OpenAIAdam(model_stepwise.parameters(),
lr=args.lr, schedule=args.lr_schedule,
warmup=args.lr_warmup, t_total=n_updates_total,
b1=args.b1, b2=args.b2, e=args.e,
l2=args.l2, ector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
epoch_start, epoch_max, loss_best = -1, args.n_epoch, None
if args.checkpoint is None:
load_openai_pretrained_model(
model_stepwise.transformer,
n_special=args.tokens_special, n_ctx=n_ctx, # n_ctx adjusts embedding size to include positional
path=pretrained_model_path+'/',
path_names=os.path.join('.', 'orig', 'pytorch-openai-transformer-lm')+'/',
)
model_stepwise.to(device)
if torch.cuda.device_count() > 1: # https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
print("Let's use", torch.cuda.device_count(), "GPUs!")
model_stepwise = nn.DataParallel(model_stepwise)
os.makedirs('./checkpoints', exist_ok=True)
if args.checkpoint is not None:
checkpoint = torch.load(args.checkpoint, map_location=lambda storage, loc: storage)
epoch_start = checkpoint['epoch']
#from collections import OrderedDict
#def fix_dict(state_dict):
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# name = k
# if name.startswith('module.'):
# name = k[7:] # remove 'module.' of dataparallel
# new_state_dict[name]=v
# return new_state_dict
#
#model.load_state_dict(new_state_dict)
model_stepwise.load_state_dict(checkpoint['model'])
model_opt.load_state_dict(checkpoint['optimizer'])
#lr_scheduler = get_lr_scheduler(optimizer)
#lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
print("Loaded %s - assuming epoch_now=%d" % (args.checkpoint, epoch_start,))
if args.predict:
# Predict out results for all the 'relation_hdf5' instead (batch_size=1 not efficient, but 'sure')
#test_loader = DataLoader(dataset=train_dataset, batch_size=1, shuffle=False) # , num_workers=1
# Predict out results for all the 'relation_hdf5' instead
test_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False) # , num_workers=1
run_predictions(test_loader=test_loader, output_file="%s_%s.npz" % (relation_hdf5, args.stub))
#run_predictions(test_loader=test_loader, output_file="%s_%s.conll" % (relation_hdf5, args.stub))
exit(0)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False) # 2 leads to device side asserts... , num_workers=1
try:
idx_loss_check, loss_recent_tot = 0, 0.
for epoch in range(epoch_start+1, epoch_max): # So this refers to the epoch-end value
time_estimate_last = t_start = time.time()
model_stepwise.train()
for idx, (features, labels, deps) in enumerate(train_loader):
features, labels, deps = features.to(device), labels.to(device), deps.to(device)
model_opt.zero_grad()
out_class_logits, out_deps_logits = model_stepwise(features)
#batch_loss = ce_loss(output, target)
# https://pytorch.org/docs/stable/nn.html?highlight=loss#torch.nn.BCEWithLogitsLoss
class_loss = nn.CrossEntropyLoss(reduction='none')( out_class_logits, labels )
#print("class_loss.size()=", class_loss.size())
# class_loss.size()= torch.Size([8, 128])
class_loss_tot = class_loss.sum()
# The dep loss should be ignored for those deps with class==0
dep_loss = nn.CrossEntropyLoss(reduction='none')( out_deps_logits, deps )
#print("dep_loss.size()=", dep_loss.size())
# dep_loss.size()= torch.Size([8, 128])
#dep_loss_masked = torch.where(labels>0, dep_loss, zero) # This zeros out all positions where labels == 0
#dep_loss_tot = dep_loss_masked.sum() / batch_size
dep_loss_tot = dep_loss.masked_fill_( labels==0, 0. ).sum()
factor_hints="Factor hints (class_loss=%8.4f, deps_loss=%10.4f, fac=%.8f)" % (
class_loss_tot.item()/batch_size*100.,
dep_loss_tot.item()/batch_size*100.,
class_loss_tot.item()/dep_loss_tot.item(), )
#factor hints : (231.14927673339844, 225.23297119140625, 1.0262674932124587)
batch_loss = class_loss_tot + args.dep_fac * dep_loss_tot
batch_loss.backward()
model_opt.step()
loss_this = batch_loss.item()
loss_recent_tot += loss_this
if idx % 10 == 0:
print('%.1f%% of epoch %d' % (idx / float(len(train_loader)) * 100, epoch,), end='\r') # Python 3 FTW!
if idx % 100 == 0:
print(epoch, idx, factor_hints)
sentences_since_last_check = (idx-idx_loss_check)*batch_size
#if sentences_since_last_check > 50000: # Potentially save every 50000 sentences (~30mins on TitanX)
if sentences_since_last_check > 200000: # Potentially save every 200000 sentences (~2hrs on TitanX)
loss_recent = loss_recent_tot / float(sentences_since_last_check) # loss per sentence
if loss_best is None or loss_recent<loss_best: # Save model if loss has decreased
fname = './checkpoints/model-grapher_%s_%02d-%07d.pth' % (args.stub, epoch, idx*batch_size,)
print("Saving Checkpoint : '%s', loss_recent=%.4f" % (fname, loss_recent/batch_size*100., ))
torch.save(dict(
epoch=epoch,
model=model_stepwise.state_dict(),
optimizer=model_opt.state_dict(),
#lr_scheduler=lr_scheduler.state_dict(),
), fname)
loss_best=loss_recent
idx_loss_check, loss_recent_tot = idx, 0. # Restart running tallies
t_now = time.time()
if t_now - time_estimate_last>5*60.: # Update every 5 minutes
calc_duration = t_now-t_start
calc_fraction = (idx*batch_size)/len(train_dataset)
epoch_duration = calc_duration/calc_fraction
epoch_max_secs = (epoch_max-(epoch+calc_fraction))*epoch_duration
epoch_max_end = epoch_max_secs + time.time() # This is since the epoch in seconds
print("Time used for %.2f of epoch %d: %.1f seconds" % (calc_fraction, epoch, calc_duration, ))
print(" Time per 1000 lines : %.3f seconds" % (epoch_duration/len(train_dataset)*1000., ))
print(" Expected finish in : %.2f hours" % ( epoch_max_secs/60/60, ))
#print(" Expected finish time : %s (server)" % ( datetime.fromtimestamp(epoch_max_end).strftime("%A, %B %d, %Y %H:%M:%S %Z%z"), ))
print(" Expected finish time : %s (%s)" % (
datetime.fromtimestamp(epoch_max_end, timezone.utc).astimezone(tz=tz).strftime("%A, %B %d, %Y %H:%M:%S %Z%z"), args.tz, ))
time_estimate_last = time.time() # Keep track of estimate times
idx_loss_check -= len(train_dataset)/batch_size # Keep track of reset idxs
# End-of-epoch saving
fname = './checkpoints/model-grapher_%s_%02d-%07d_end-epoch.pth' % (args.stub, epoch, idx*batch_size,)
print("Saving End-epoch checkpoint : '%s'" % (fname, ))
torch.save(dict(
epoch=epoch,
model=model_stepwise.state_dict(),
optimizer=model_opt.state_dict(),
), fname)
except KeyboardInterrupt:
print("Interrupted. Releasing resources...")
finally:
train_dataset.close()
exit(0)
|
play-1.2.4/python/Lib/site-packages/Rpyc/Servers/Users.py | AppSecAI-TEST/restcommander | 550 | 12726306 | <gh_stars>100-1000
#
# chmod this file securely and be sure to remove the default users
#
users = {
"frodo" : "1ring",
"yossarian" : "catch22",
"ayla" : "jondalar",
}
|
model.py | dhirajpatnaik16297/text-gan-tensorflow | 102 | 12726329 | """ Text GAN
Adverserial networks applied to language models using Gumbel Softmax.
Can be used as pure language model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from collections import namedtuple
import tensorflow as tf
from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step
# -- local imports
from data_loader import get_corpus_size, build_vocab, preprocess, get_input_queues
import layers as lay
from decoders import gumbel_decoder_fn
GENERATOR_PREFIX = "generator"
DISCRIMINATOR_PREFIX = "discriminator"
GeneratorTuple = namedtuple("Generator",
["rnn_outputs", "flat_logits", "probs", "loss", "embedding_matrix", "output_projections"])
DiscriminatorTuple = namedtuple("Discriminator",
["rnn_final_state", "prediction_logits", "loss"])
# TODO: separate the variables for generator and discriminators
class Model:
def __init__(self, corpus, **opts):
self.corpus = corpus
self.opts = opts
self.global_step = get_or_create_global_step()
self.increment_global_step_op = tf.assign(self.global_step, self.global_step + 1, name="increment_global_step")
self.corpus_size = get_corpus_size(self.corpus["train"])
self.corpus_size_valid = get_corpus_size(self.corpus["valid"])
self.word2idx, self.idx2word = build_vocab(self.corpus["train"])
self.vocab_size = len(self.word2idx)
self.generator_template = tf.make_template(GENERATOR_PREFIX, generator)
self.discriminator_template = tf.make_template(DISCRIMINATOR_PREFIX, discriminator)
self.enqueue_data, _, source, target, sequence_length = \
prepare_data(self.corpus["train"], self.word2idx, num_threads=7, **self.opts)
# TODO: option to either do pretrain or just generate?
self.g_tensors_pretrain = self.generator_template(
source, target, sequence_length, self.vocab_size, **self.opts)
self.enqueue_data_valid, self.input_ph, source_valid, target_valid, sequence_length_valid = \
prepare_data(self.corpus["valid"], self.word2idx, num_threads=1, **self.opts)
self.g_tensors_pretrain_valid = self.generator_template(
source_valid, target_valid, sequence_length_valid, self.vocab_size, **self.opts)
self.decoder_fn = prepare_custom_decoder(
sequence_length, self.g_tensors_pretrain.embedding_matrix, self.g_tensors_pretrain.output_projections)
self.g_tensors_fake = self.generator_template(
source, target, sequence_length, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)
self.g_tensors_fake_valid = self.generator_template(
source_valid, target_valid, sequence_length_valid, self.vocab_size, decoder_fn=self.decoder_fn, **self.opts)
# TODO: using the rnn outputs from pretraining as "real" instead of target embeddings (aka professor forcing)
self.d_tensors_real = self.discriminator_template(
self.g_tensors_pretrain.rnn_outputs, sequence_length, is_real=True, **self.opts)
# TODO: check to see if sequence_length is correct
self.d_tensors_fake = self.discriminator_template(
self.g_tensors_fake.rnn_outputs, None, is_real=False, **self.opts)
self.g_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=GENERATOR_PREFIX)
self.d_tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=DISCRIMINATOR_PREFIX)
def prepare_data(path, word2idx, num_threads=8, **opts):
with tf.device("/cpu:0"):
enqueue_data, dequeue_batch = get_input_queues(
path, word2idx, batch_size=opts["batch_size"], num_threads=num_threads)
# TODO: put this logic somewhere else
input_ph = tf.placeholder_with_default(dequeue_batch, (None, None))
source, target, sequence_length = preprocess(input_ph)
return enqueue_data, input_ph, source, target, sequence_length
def prepare_custom_decoder(sequence_length, embedding_matrix, output_projections):
# TODO: this is brittle, global variables
cell = tf.get_collection("rnn_cell")[0]
encoder_state = cell.zero_state(tf.shape(sequence_length)[0], tf.float32)
# embedding_matrix = tf.get_collection("embedding_matrix")[0]
# output_projections = tf.get_collection("output_projections")[:2] # TODO: repeated output_projections
maximum_length = tf.reduce_max(sequence_length) + 3
decoder_fn = gumbel_decoder_fn(encoder_state, embedding_matrix, output_projections, maximum_length)
return decoder_fn
def generator(source, target, sequence_length, vocab_size, decoder_fn=None, **opts):
"""
Args:
source: TensorFlow queue or placeholder tensor for word ids for source
target: TensorFlow queue or placeholder tensor for word ids for target
sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence
vocab_size: max vocab size determined from data
decoder_fn: if using custom decoder_fn else use the default dynamic_rnn
"""
tf.logging.info(" Setting up generator")
embedding_layer = lay.embedding_layer(vocab_size, opts["embedding_dim"], name="embedding_matrix")
# TODO: add batch norm?
rnn_outputs = (
source >>
embedding_layer >>
lay.word_dropout_layer(keep_prob=opts["word_dropout_keep_prob"]) >>
lay.recurrent_layer(hidden_dims=opts["rnn_hidden_dim"], keep_prob=opts["recurrent_dropout_keep_prob"],
sequence_length=sequence_length, decoder_fn=decoder_fn, name="rnn_cell")
)
output_projection_layer = lay.dense_layer(hidden_dims=vocab_size, name="output_projections")
flat_logits = (
rnn_outputs >>
lay.reshape_layer(shape=(-1, opts["rnn_hidden_dim"])) >>
output_projection_layer
)
probs = flat_logits >> lay.softmax_layer()
embedding_matrix = embedding_layer.get_variables_in_scope()
output_projections = output_projection_layer.get_variables_in_scope()
if decoder_fn is not None:
return GeneratorTuple(rnn_outputs=rnn_outputs, flat_logits=flat_logits, probs=probs, loss=None,
embedding_matrix=embedding_matrix[0], output_projections=output_projections)
loss = (
flat_logits >>
lay.cross_entropy_layer(target=target) >>
lay.reshape_layer(shape=tf.shape(target)) >>
lay.mean_loss_by_example_layer(sequence_length=sequence_length)
)
# TODO: add dropout penalty
return GeneratorTuple(rnn_outputs=rnn_outputs, flat_logits=flat_logits, probs=probs, loss=loss,
embedding_matrix=embedding_matrix[0], output_projections=output_projections)
def discriminator(input_vectors, sequence_length, is_real=True, **opts):
"""
Args:
input_vectors: output of the RNN either from real or generated data
sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence
is_real: if True, RNN outputs when feeding in actual data, if False feeds in generated data
"""
tf.logging.info(" Setting up discriminator")
rnn_final_state = (
input_vectors >>
lay.dense_layer(hidden_dims=opts["embedding_dim"]) >>
lay.recurrent_layer(sequence_length=sequence_length, hidden_dims=opts["rnn_hidden_dim"],
return_final_state=True)
)
prediction_logits = (
rnn_final_state >>
lay.dense_layer(hidden_dims=opts["output_hidden_dim"]) >>
lay.relu_layer() >>
lay.dropout_layer(opts["output_dropout_keep_prob"]) >>
lay.dense_layer(hidden_dims=opts["output_hidden_dim"]) >>
lay.relu_layer() >>
lay.dropout_layer(opts["output_dropout_keep_prob"]) >>
lay.dense_layer(hidden_dims=1)
)
if is_real:
target = tf.ones_like(prediction_logits)
else:
target = tf.zeros_like(prediction_logits)
# TODO: add accuracy
loss = (
prediction_logits >>
lay.sigmoid_cross_entropy_layer(target=target)
)
# TODO: return logits in case for WGAN and l2 GANs
return DiscriminatorTuple(rnn_final_state=rnn_final_state, prediction_logits=prediction_logits, loss=loss)
if __name__ == "__main__":
from data_loader import DATA_PATH
from train import opts
corpus = DATA_PATH["ptb"]
model = Model(corpus, **opts)
|
dialogue-engine/test/programytest/storage/asserts/store/assert_rdfs.py | cotobadesign/cotoba-agent-oss | 104 | 12726346 | <reponame>cotobadesign/cotoba-agent-oss
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
import os.path
from programy.rdf.collection import RDFCollection
from programy.storage.entities.store import Store
class RDFStoreAsserts(unittest.TestCase):
def assert_rdf_storage(self, store):
store.empty()
store.add_rdf("ACTIVITY", "ACT", "hasPurpose", "to entertain by performing")
store.add_rdf("ACTIVITY", "ACT", "hasSize", "0")
store.add_rdf("ACTIVITY", "ACT", "hasSyllables", "1")
store.add_rdf("ACTIVITY", "ACT", "isa", "Activity0")
store.add_rdf("ACTIVITY", "ACT", "lifeArea", "Recreation")
store.commit()
rdf_collection = RDFCollection()
store.load(rdf_collection, "ACTIVITY")
self.assertTrue(rdf_collection.contains("ACTIVITY"))
self.assertTrue(rdf_collection.has_subject('ACT'))
self.assertTrue(rdf_collection.has_predicate('ACT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ACT', "hasPurpose", "to entertain by performing"))
def assert_upload_from_text(self, store):
store.empty()
store.upload_from_text("ACTIVITY", """
ACT:hasPurpose:to entertain by performing
ACT:hasSize:0
ACT:hasSyllables:1
ACT:isa:Activity
ACT:lifeArea:Recreation
ADVENTURE:hasPurpose:to provide new experience
ADVENTURE:hasSize:0
ADVENTURE:hasSyllables:3
ADVENTURE:isa:Activity
ADVENTURE:lifeArea:Recreation
FISHING:hasPurpose:to hunt for fish
FISHING:hasSize:0
FISHING:hasSyllables:2
FISHING:isa:Activity
FISHING:lifeArea:Recreation
""")
rdf_collection = RDFCollection()
store.load(rdf_collection, "ACTIVITY")
self.assertTrue(rdf_collection.contains("ACTIVITY"))
self.assertTrue(rdf_collection.has_subject('ACT'))
self.assertTrue(rdf_collection.has_predicate('ACT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ACT', "hasPurpose", "to entertain by performing"))
def assert_upload_from_text_file(self, store):
store.empty()
store.upload_from_file(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"text"+os.sep+"activity.rdf")
rdf_collection = RDFCollection()
store.load(rdf_collection, "ACTIVITY")
self.assertTrue(rdf_collection.contains("ACTIVITY"))
self.assertTrue(rdf_collection.has_subject('ACT'))
self.assertTrue(rdf_collection.has_predicate('ACT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ACT', "hasPurpose", "to entertain by performing"))
def assert_upload_text_files_from_directory_no_subdir(self, store):
store.empty()
store.upload_from_directory(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"text", subdir=False)
rdf_collection = RDFCollection()
store.load(rdf_collection, "ACTIVITY")
self.assertTrue(rdf_collection.contains("ACTIVITY"))
self.assertTrue(rdf_collection.has_subject('ACT'))
self.assertTrue(rdf_collection.has_predicate('ACT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ACT', "hasPurpose", "to entertain by performing"))
def assert_upload_from_csv_file(self, store):
store.empty()
store.upload_from_file(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"csv"+os.sep+"activity.csv", format=Store.CSV_FORMAT)
rdf_collection = RDFCollection()
store.load(rdf_collection, "ACTIVITY")
self.assertTrue(rdf_collection.contains("ACTIVITY"))
self.assertTrue(rdf_collection.has_subject('ACT'))
self.assertTrue(rdf_collection.has_predicate('ACT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ACT', "hasPurpose", "to entertain by performing"))
def assert_upload_csv_files_from_directory_with_subdir(self, store):
store.empty()
store.upload_from_directory(os.path.dirname(__file__)+os.sep+"data"+os.sep+"rdfs"+os.sep+"csv", subdir=True, format=Store.CSV_FORMAT)
rdf_collection = RDFCollection()
store.load_all(rdf_collection)
self.assertTrue(rdf_collection.contains("ACTIVITY"))
self.assertTrue(rdf_collection.has_subject('ACT'))
self.assertTrue(rdf_collection.has_predicate('ACT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ACT', "hasPurpose", "to entertain by performing"))
self.assertTrue(rdf_collection.contains("ANIMAL"))
self.assertTrue(rdf_collection.has_subject('ANT'))
self.assertTrue(rdf_collection.has_predicate('ANT', "hasPurpose"))
self.assertTrue(rdf_collection.has_object('ANT', "hasPurpose", "to make anthills"))
|
contrib/stack/alosStack/estimate_slc_offset.py | yuankailiu/isce2 | 1,133 | 12726373 | #!/usr/bin/env python3
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import glob
import datetime
import numpy as np
import isce, isceobj
import mroipac
from mroipac.ampcor.Ampcor import Ampcor
from isceobj.Alos2Proc.Alos2ProcPublic import topo
from isceobj.Alos2Proc.Alos2ProcPublic import geo2rdr
from isceobj.Alos2Proc.Alos2ProcPublic import waterBodyRadar
from isceobj.Alos2Proc.Alos2ProcPublic import reformatGeometricalOffset
from isceobj.Alos2Proc.Alos2ProcPublic import writeOffset
from isceobj.Alos2Proc.Alos2ProcPublic import cullOffsets
from isceobj.Alos2Proc.Alos2ProcPublic import computeOffsetFromOrbit
from StackPulic import loadTrack
from StackPulic import stackDateStatistics
from StackPulic import acquisitionModesAlos2
def cmdLineParse():
'''
command line parser.
'''
import sys
import argparse
parser = argparse.ArgumentParser(description='estimate offset between a pair of SLCs for a number of dates')
parser.add_argument('-idir', dest='idir', type=str, required=True,
help = 'input directory where data of each date (YYMMDD) is located. only folders are recognized')
parser.add_argument('-ref_date', dest='ref_date', type=str, required=True,
help = 'reference date. format: YYMMDD')
parser.add_argument('-sec_date', dest='sec_date', type=str, nargs='+', default=[],
help = 'a number of secondary dates seperated by blanks. format: YYMMDD YYMMDD YYMMDD. If provided, only estimate offsets of these dates')
parser.add_argument('-wbd', dest='wbd', type=str, default=None,
help = 'water body used to determine number of offsets in range and azimuth')
parser.add_argument('-dem', dest='dem', type=str, default=None,
help = 'if water body is provided, dem file must also be provided')
parser.add_argument('-use_wbd_offset', dest='use_wbd_offset', action='store_true', default=False,
help='use water body to dertermine number of matching offsets')
parser.add_argument('-num_rg_offset', dest='num_rg_offset', type=int, nargs='+', action='append', default=[],
help = 'number of offsets in range. format (e.g. 2 frames, 3 swaths): -num_rg_offset 11 12 13 -num_rg_offset 14 15 16')
parser.add_argument('-num_az_offset', dest='num_az_offset', type=int, nargs='+', action='append', default=[],
help = 'number of offsets in azimuth. format (e.g. 2 frames, 3 swaths): -num_az_offset 11 12 13 -num_az_offset 14 15 16')
if len(sys.argv) <= 1:
print('')
parser.print_help()
sys.exit(1)
else:
return parser.parse_args()
if __name__ == '__main__':
inps = cmdLineParse()
#get user parameters from input
idir = inps.idir
dateReference = inps.ref_date
dateSecondary = inps.sec_date
wbd = inps.wbd
dem = inps.dem
useWbdForNumberOffsets = inps.use_wbd_offset
numberOfOffsetsRangeInput = inps.num_rg_offset
numberOfOffsetsAzimuthInput = inps.num_az_offset
if wbd is not None:
wbdFile = os.path.abspath(wbd)
else:
wbdFile = None
if dem is not None:
demFile = os.path.abspath(dem)
else:
demFile = None
#######################################################
spotlightModes, stripmapModes, scansarNominalModes, scansarWideModes, scansarModes = acquisitionModesAlos2()
warningMessage = ''
#get date statistics
dateDirs, dates, frames, swaths, dateIndexReference = stackDateStatistics(idir, dateReference)
ndate = len(dates)
nframe = len(frames)
nswath = len(swaths)
#load reference track
referenceTrack = loadTrack(dateDirs[dateIndexReference], dates[dateIndexReference])
dateSecondaryFirst = None
for idate in range(ndate):
if idate == dateIndexReference:
continue
if dateSecondary != []:
if dates[idate] not in dateSecondary:
continue
dateSecondaryFirst = dates[idate]
break
if dateSecondaryFirst is None:
raise Exception('no secondary date is to be processed\n')
#set number of matching points
numberOfOffsetsRangeUsed = [[None for j in range(nswath)] for i in range(nframe)]
numberOfOffsetsAzimuthUsed = [[None for j in range(nswath)] for i in range(nframe)]
for i, frameNumber in enumerate(frames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)):
swathDir = 's{}'.format(swathNumber)
print('determine number of range/azimuth offsets frame {}, swath {}'.format(frameNumber, swathNumber))
referenceSwath = referenceTrack.frames[i].swaths[j]
#1. set initinial numbers
#in case there are long time span pairs that have bad coherence
ratio = np.sqrt(1.5)
if referenceTrack.operationMode in scansarModes:
numberOfOffsetsRange = int(10*ratio+0.5)
numberOfOffsetsAzimuth = int(40*ratio+0.5)
else:
numberOfOffsetsRange = int(20*ratio+0.5)
numberOfOffsetsAzimuth = int(20*ratio+0.5)
#2. change the initial numbers using water body
if useWbdForNumberOffsets and (wbdFile is not None) and (demFile is not None):
numberRangeLooks=100
numberAzimuthLooks=100
#compute land ratio using topo module
# latFile = 'lat_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# lonFile = 'lon_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# hgtFile = 'hgt_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# losFile = 'los_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# wbdRadarFile = 'wbd_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
latFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'lat.rdr')
lonFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'lon.rdr')
hgtFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'hgt.rdr')
losFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'los.rdr')
wbdRadarFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'wbd.rdr')
topo(referenceSwath, referenceTrack, demFile, latFile, lonFile, hgtFile, losFile=losFile,
incFile=None, mskFile=None,
numberRangeLooks=numberRangeLooks, numberAzimuthLooks=numberAzimuthLooks, multilookTimeOffset=False)
waterBodyRadar(latFile, lonFile, wbdFile, wbdRadarFile)
wbdImg = isceobj.createImage()
wbdImg.load(wbdRadarFile+'.xml')
width = wbdImg.width
length = wbdImg.length
wbd = np.fromfile(wbdRadarFile, dtype=np.byte).reshape(length, width)
landRatio = np.sum(wbd==0) / (length*width)
if (landRatio <= 0.00125):
print('\n\nWARNING: land too small for estimating slc offsets at frame {}, swath {}'.format(frameNumber, swathNumber))
print('proceed to use geometric offsets for forming interferogram')
print('but please consider not using this swath\n\n')
warningMessage += 'land too small for estimating slc offsets at frame {}, swath {}, use geometric offsets\n'.format(frameNumber, swathNumber)
numberOfOffsetsRange = 0
numberOfOffsetsAzimuth = 0
else:
#put the results on a grid with a specified interval
interval = 0.2
axisRatio = int(np.sqrt(landRatio)/interval)*interval + interval
if axisRatio > 1:
axisRatio = 1
numberOfOffsetsRange = int(numberOfOffsetsRange/axisRatio)
numberOfOffsetsAzimuth = int(numberOfOffsetsAzimuth/axisRatio)
else:
warningMessage += 'no water mask used to determine number of matching points. frame {} swath {}\n'.format(frameNumber, swathNumber)
#3. user's settings
if numberOfOffsetsRangeInput != []:
numberOfOffsetsRange = numberOfOffsetsRangeInput[i][j]
if numberOfOffsetsAzimuthInput != []:
numberOfOffsetsAzimuth = numberOfOffsetsAzimuthInput[i][j]
#4. save final results
numberOfOffsetsRangeUsed[i][j] = numberOfOffsetsRange
numberOfOffsetsAzimuthUsed[i][j] = numberOfOffsetsAzimuth
#estimate offsets
for idate in range(ndate):
if idate == dateIndexReference:
continue
if dateSecondary != []:
if dates[idate] not in dateSecondary:
continue
secondaryTrack = loadTrack(dateDirs[idate], dates[idate])
for i, frameNumber in enumerate(frames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)):
swathDir = 's{}'.format(swathNumber)
print('estimating offset frame {}, swath {}'.format(frameNumber, swathNumber))
referenceDir = os.path.join(dateDirs[dateIndexReference], frameDir, swathDir)
secondaryDir = os.path.join(dateDirs[idate], frameDir, swathDir)
referenceSwath = referenceTrack.frames[i].swaths[j]
secondarySwath = secondaryTrack.frames[i].swaths[j]
#compute geometrical offsets
if (wbdFile is not None) and (demFile is not None) and (numberOfOffsetsRangeUsed[i][j] == 0) and (numberOfOffsetsAzimuthUsed[i][j] == 0):
#compute geomtricla offsets
# latFile = 'lat_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# lonFile = 'lon_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# hgtFile = 'hgt_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# losFile = 'los_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# rgOffsetFile = 'rg_offset_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# azOffsetFile = 'az_offset_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# wbdRadarFile = 'wbd_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
latFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'lat.rdr')
lonFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'lon.rdr')
hgtFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'hgt.rdr')
losFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'los.rdr')
#put them in current date directory
rgOffsetFile = os.path.join(idir, dates[idate], frameDir, swathDir, 'rg_offset.rdr')
azOffsetFile = os.path.join(idir, dates[idate], frameDir, swathDir, 'az_offset.rdr')
wbdRadarFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'wbd.rdr')
geo2rdr(secondarySwath, secondaryTrack, latFile, lonFile, hgtFile, rgOffsetFile, azOffsetFile, numberRangeLooks=numberRangeLooks, numberAzimuthLooks=numberAzimuthLooks, multilookTimeOffset=False)
reformatGeometricalOffset(rgOffsetFile, azOffsetFile, os.path.join(secondaryDir, 'cull.off'), rangeStep=numberRangeLooks, azimuthStep=numberAzimuthLooks, maximumNumberOfOffsets=2000)
os.remove(rgOffsetFile)
os.remove(rgOffsetFile+'.vrt')
os.remove(rgOffsetFile+'.xml')
os.remove(azOffsetFile)
os.remove(azOffsetFile+'.vrt')
os.remove(azOffsetFile+'.xml')
#estimate offsets using ampcor
else:
ampcor = Ampcor(name='insarapp_slcs_ampcor')
ampcor.configure()
mSLC = isceobj.createSlcImage()
mSLC.load(os.path.join(referenceDir, dates[dateIndexReference]+'.slc.xml'))
mSLC.filename = os.path.join(referenceDir, dates[dateIndexReference]+'.slc')
mSLC.extraFilename = os.path.join(referenceDir, dates[dateIndexReference]+'.slc.vrt')
mSLC.setAccessMode('read')
mSLC.createImage()
sSLC = isceobj.createSlcImage()
sSLC.load(os.path.join(secondaryDir, dates[idate]+'.slc.xml'))
sSLC.filename = os.path.join(secondaryDir, dates[idate]+'.slc')
sSLC.extraFilename = os.path.join(secondaryDir, dates[idate]+'.slc.vrt')
sSLC.setAccessMode('read')
sSLC.createImage()
ampcor.setImageDataType1('complex')
ampcor.setImageDataType2('complex')
ampcor.setReferenceSlcImage(mSLC)
ampcor.setSecondarySlcImage(sSLC)
#MATCH REGION
#compute an offset at image center to use
rgoff, azoff = computeOffsetFromOrbit(referenceSwath, referenceTrack, secondarySwath, secondaryTrack,
referenceSwath.numberOfSamples * 0.5,
referenceSwath.numberOfLines * 0.5)
#it seems that we cannot use 0, haven't look into the problem
if rgoff == 0:
rgoff = 1
if azoff == 0:
azoff = 1
firstSample = 1
if rgoff < 0:
firstSample = int(35 - rgoff)
firstLine = 1
if azoff < 0:
firstLine = int(35 - azoff)
ampcor.setAcrossGrossOffset(rgoff)
ampcor.setDownGrossOffset(azoff)
ampcor.setFirstSampleAcross(firstSample)
ampcor.setLastSampleAcross(mSLC.width)
ampcor.setNumberLocationAcross(numberOfOffsetsRangeUsed[i][j])
ampcor.setFirstSampleDown(firstLine)
ampcor.setLastSampleDown(mSLC.length)
ampcor.setNumberLocationDown(numberOfOffsetsAzimuthUsed[i][j])
#MATCH PARAMETERS
#full-aperture mode
if referenceTrack.operationMode in scansarModes:
ampcor.setWindowSizeWidth(64)
ampcor.setWindowSizeHeight(512)
#note this is the half width/length of search area, number of resulting correlation samples: 32*2+1
ampcor.setSearchWindowSizeWidth(32)
ampcor.setSearchWindowSizeHeight(32)
#triggering full-aperture mode matching
ampcor.setWinsizeFilt(8)
ampcor.setOversamplingFactorFilt(64)
#regular mode
else:
ampcor.setWindowSizeWidth(64)
ampcor.setWindowSizeHeight(64)
ampcor.setSearchWindowSizeWidth(32)
ampcor.setSearchWindowSizeHeight(32)
#REST OF THE STUFF
ampcor.setAcrossLooks(1)
ampcor.setDownLooks(1)
ampcor.setOversamplingFactor(64)
ampcor.setZoomWindowSize(16)
#1. The following not set
#Matching Scale for Sample/Line Directions (-) = 1. 1.
#should add the following in Ampcor.py?
#if not set, in this case, Ampcor.py'value is also 1. 1.
#ampcor.setScaleFactorX(1.)
#ampcor.setScaleFactorY(1.)
#MATCH THRESHOLDS AND DEBUG DATA
#2. The following not set
#in roi_pac the value is set to 0 1
#in isce the value is set to 0.001 1000.0
#SNR and Covariance Thresholds (-) = {s1} {s2}
#should add the following in Ampcor?
#THIS SHOULD BE THE ONLY THING THAT IS DIFFERENT FROM THAT OF ROI_PAC
#ampcor.setThresholdSNR(0)
#ampcor.setThresholdCov(1)
ampcor.setDebugFlag(False)
ampcor.setDisplayFlag(False)
#in summary, only two things not set which are indicated by 'The following not set' above.
#run ampcor
ampcor.ampcor()
offsets = ampcor.getOffsetField()
ampcorOffsetFile = os.path.join(secondaryDir, 'ampcor.off')
writeOffset(offsets, ampcorOffsetFile)
#finalize image, and re-create it
#otherwise the file pointer is still at the end of the image
mSLC.finalizeImage()
sSLC.finalizeImage()
##########################################
#3. cull offsets
##########################################
refinedOffsets = cullOffsets(offsets)
if refinedOffsets == None:
print('******************************************************************')
print('WARNING: There are not enough offsets left, so we are forced to')
print(' use offset without culling. frame {}, swath {}'.format(frameNumber, swathNumber))
print('******************************************************************')
warningMessage += 'not enough offsets left, use offset without culling. frame {} swath {}'.format(frameNumber, swathNumber)
refinedOffsets = offsets
cullOffsetFile = os.path.join(secondaryDir, 'cull.off')
writeOffset(refinedOffsets, cullOffsetFile)
#os.chdir('../')
#os.chdir('../')
#delete geometry files
for i, frameNumber in enumerate(frames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)):
swathDir = 's{}'.format(swathNumber)
if (wbdFile is not None) and (demFile is not None):
# latFile = 'lat_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# lonFile = 'lon_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# hgtFile = 'hgt_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# losFile = 'los_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
# wbdRadarFile = 'wbd_f{}_{}_s{}.rdr'.format(i+1, frameNumber, swathNumber)
latFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'lat.rdr')
lonFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'lon.rdr')
hgtFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'hgt.rdr')
losFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'los.rdr')
wbdRadarFile = os.path.join(idir, dateSecondaryFirst, frameDir, swathDir, 'wbd.rdr')
os.remove(latFile)
os.remove(latFile+'.vrt')
os.remove(latFile+'.xml')
os.remove(lonFile)
os.remove(lonFile+'.vrt')
os.remove(lonFile+'.xml')
os.remove(hgtFile)
os.remove(hgtFile+'.vrt')
os.remove(hgtFile+'.xml')
os.remove(losFile)
os.remove(losFile+'.vrt')
os.remove(losFile+'.xml')
os.remove(wbdRadarFile)
os.remove(wbdRadarFile+'.vrt')
os.remove(wbdRadarFile+'.xml')
numberOfOffsetsUsedTxt = '\nnumber of offsets in cross correlation:\n'
numberOfOffsetsUsedTxt += ' frame swath range azimuth\n'
numberOfOffsetsUsedTxt += '============================================\n'
for i, frameNumber in enumerate(frames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
for j, swathNumber in enumerate(range(swaths[0], swaths[-1] + 1)):
swathDir = 's{}'.format(swathNumber)
numberOfOffsetsUsedTxt += ' {} {} {} {}\n'.format(frameNumber, swathNumber, numberOfOffsetsRangeUsed[i][j], numberOfOffsetsAzimuthUsed[i][j])
print(numberOfOffsetsUsedTxt)
if warningMessage != '':
print('\n'+warningMessage+'\n')
|
ansible/roles/lib_gcloud/build/lib/vminstance.py | fahlmant/openshift-tools | 164 | 12726390 | <reponame>fahlmant/openshift-tools
# pylint: skip-file
# pylint: disable=too-many-instance-attributes
class VMInstance(GCPResource):
'''Object to represent a gcp instance'''
resource_type = "compute.v1.instance"
# pylint: disable=too-many-arguments
def __init__(self,
rname,
project,
zone,
machine_type,
metadata,
tags,
disks,
network_interfaces,
service_accounts=None,
):
'''constructor for gcp resource'''
super(VMInstance, self).__init__(rname, VMInstance.resource_type, project, zone)
self._machine_type = machine_type
self._service_accounts = service_accounts
self._machine_type_url = None
self._tags = tags
self._metadata = []
if metadata and isinstance(metadata, dict):
self._metadata = {'items': [{'key': key, 'value': value} for key, value in metadata.items()]}
elif metadata and isinstance(metadata, list):
self._metadata = [{'key': label['key'], 'value': label['value']} for label in metadata]
self._disks = disks
self._network_interfaces = network_interfaces
self._properties = None
@property
def service_accounts(self):
'''property for resource service accounts '''
return self._service_accounts
@property
def network_interfaces(self):
'''property for resource machine network_interfaces '''
return self._network_interfaces
@property
def machine_type(self):
'''property for resource machine type '''
return self._machine_type
@property
def machine_type_url(self):
'''property for resource machine type url'''
if self._machine_type_url == None:
self._machine_type_url = Utils.zonal_compute_url(self.project, self.zone, 'machineTypes', self.machine_type)
return self._machine_type_url
@property
def tags(self):
'''property for resource tags '''
return self._tags
@property
def metadata(self):
'''property for resource metadata'''
return self._metadata
@property
def disks(self):
'''property for resource disks'''
return self._disks
@property
def properties(self):
'''property for holding the properties'''
if self._properties == None:
self._properties = {'zone': self.zone,
'machineType': self.machine_type_url,
'metadata': self.metadata,
'tags': self.tags,
'disks': self.disks,
'networkInterfaces': self.network_interfaces,
}
if self.service_accounts:
self._properties['serviceAccounts'] = self.service_accounts
return self._properties
def to_resource(self):
'''return the resource representation'''
return {'name': self.name,
'type': VMInstance.resource_type,
'properties': self.properties,
}
|
feature_extract/bresenham_circle.py | chexueji/easy_slam_tutorial | 231 | 12726410 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 15:02:36 2019
Bresenham画圆法实现
博客教程地址:
https://blog.csdn.net/varyshare/article/details/96724103
@author: 知乎@Ai酱
"""
import numpy as np
import matplotlib.pyplot as plt
img = np.zeros((105,105)) # 创建一个105x105的画布
count = 0
def draw(x,y):
"""
绘制点(x,y)
注意:需要把(x,y)变换到数组坐标系(图形学坐标系)
因为数组(0,0)是左上,而原先坐标系(0,0)是中心点
而且数组行数向下是增加的。
"""
img[-y+int(img.shape[0]/2),x+int(img.shape[1]/2)] = 1
pass
r_pixel = 50 # 圆的半径,单位:像素
# 初始化,画第一个点,从水平最右边那个点开始画
(x,y) = (r_pixel,0)
"""
从定义来讲就是
P_k=d1+d2
d1 = 第1个下一步待选点离圆弧的距离(负数)
d2 = 第2个下一步待选点离圆弧的距离(正数)
但是为了提高效率通常使用递推来求P_{k+1}=P_k + 一个数
"""
P_k = -2*r_pixel + 3
# 迭代的求完1/8圆弧
while x>=y:
# 下一步有两个待选点,具体选哪个要看P_k>0 或 <0
if P_k>=0:# 外侧候选点偏离圆弧更远
P_k_next = P_k - 4*x + 4*y + 10
(x_next,y_next) = (x-1, y+1)
else:# 内侧候选点偏离圆弧更远
P_k_next = P_k + 4*y + 6
(x_next,y_next) = (x, y+1)
# 对称法画其他地方
draw(x,y)
draw(-x,y)
draw(x,-y)
draw(-x,-y)
draw(y,x)
draw(y,-x)
draw(-y,x)
draw(-y,-x)
# 更新坐标和P_k
(x,y) = (int(x_next),int(y_next))
P_k = P_k_next
pass
# 绘制图片
plt.imshow(img)
|
goatools/gosubdag/plot/goea_results.py | flying-sheep/goatools | 477 | 12726467 | """Manages GO Term fill colors and bordercolors."""
__copyright__ = "Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved."
__author__ = "<NAME>"
import sys
import collections as cx
class GoeaResults(object):
"""Manages GOEA Results for plotting."""
kws_set = set(['id2symbol', 'study_items', 'items_p_line ', 'pval_name'])
dflt_items_p_line = 5 # study items (e.g., genes) per line on GO Terms
fmtres = "{study_count} genes"
alpha2col = cx.OrderedDict([
# Enriched GOEA GO terms that are significant
(0.005, 'mistyrose'),
(0.010, 'moccasin'),
(0.050, 'lemonchiffon1'),
# GOEA GO terms that are not significant
(1.000, 'grey95'),
])
def __init__(self, goea_results, **kws):
# kws: goea_results or go2nt
assert goea_results, "NO GOEA RESULTS IN GoeaResults INPUTS"
# GOATOOLs results as objects (WAS: Kws goea_results go2nt)
self.go2res = {r.GO: r for r in goea_results}
self.is_goterm = hasattr(goea_results[0], "_fldsdefprt")
# GOATOOLs results as a list of namedtuples
self.pval_name = self._init_pval_name(**kws)
self.study_items = kws.get('study_items', None)
self.study_items_max = self._init_study_items_max()
self.items_p_line = kws['items_p_line'] if 'items_p_line' in kws else self.dflt_items_p_line
self.id2symbol = kws['id2symbol'] if 'id2symbol' in kws else {}
def prt_summary(self, prt=sys.stdout):
"""Print summary of GOEA plotting object."""
desc = "NtGoeaResults" if self.is_goterm else "namedtuple"
prt.write("{N} GOEA results from {O}. P-values stored in {P}.\n".format(
N=len(self.go2res), O=desc, P=self.pval_name))
def get_study_txt(self, goid):
"""Get GO text from GOEA study."""
if goid in self.go2res:
res = self.go2res[goid]
if res.study_items is not None:
return self._get_item_str(res)
else:
return self.fmtres.format(study_count=res.study_count)
def set_goid2color_pval(self, goid2color):
"""Fill missing colors based on p-value of an enriched GO term."""
alpha2col = self.alpha2col
if self.pval_name is not None:
pval_name = self.pval_name
for goid, res in self.go2res.items():
pval = getattr(res, pval_name, None)
if pval is not None:
for alpha, color in alpha2col.items():
if pval <= alpha and res.study_count != 0:
if goid not in goid2color:
goid2color[goid] = color
def get_goid2color_pval(self):
"""Return a go2color dict containing GO colors determined by P-value."""
go2color = {}
self.set_goid2color_pval(go2color)
color_dflt = self.alpha2col[1.000]
for goid in self.go2res:
if goid not in go2color:
go2color[goid] = color_dflt
return go2color
def _get_item_str(self, res):
"""Return genes in any of these formats:
1. 19264, 17319, 12520, 12043, 74131, 22163, 12575
2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a
3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3...
"""
ipl = self.items_p_line
prt_items = sorted([self._get_genestr(itemid) for itemid in res.study_items])
prt_multiline = [prt_items[i:i+ipl] for i in range(0, len(prt_items), ipl)]
num_items = len(prt_items)
if self.study_items_max is None:
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return "{N}) {GENES}".format(N=num_items, GENES=genestr)
else:
if num_items <= self.study_items_max:
gene_lines = [", ".join(str(e) for e in sublist) for sublist in prt_multiline]
genestr = "\n".join(gene_lines)
return genestr
else:
short_list = prt_items[:self.study_items_max]
short_mult = [short_list[i:i+ipl] for i in range(0, len(short_list), ipl)]
short_lines = [", ".join(str(e) for e in sublist) for sublist in short_mult]
short_str = "\n".join(short_lines)
return "".join(["{N} genes; ".format(N=num_items), short_str, "..."])
def _get_genestr(self, itemid):
"""Given a geneid, return the string geneid or a gene symbol."""
if itemid in self.id2symbol:
symbol = self.id2symbol[itemid]
if symbol is not None:
return symbol
if isinstance(itemid, int):
return str(itemid)
return itemid
def _init_pval_name(self, **kws):
"""Initialize pvalue attribute name."""
if 'pval_name' in kws:
return kws['pval_name']
# If go2res contains GO Terms
if self.is_goterm:
return "p_{M}".format(M=next(iter(self.go2res.values())).get_method_name())
# If go2res contains GO namedtuples
for fld in next(iter(self.go2res.values()))._fields:
if fld[:2] == 'p_' and fld != 'p_uncorrected':
return fld
def _init_study_items_max(self):
"""User can limit the number of genes printed in a GO term."""
if self.study_items is None:
return None
if self.study_items is True:
return None
if isinstance(self.study_items, int):
return self.study_items
return None
# Copyright (C) 2016-2017, <NAME>, <NAME>, All rights reserved.
|
tests/test_boringdisabled.py | acpaquette/deepstate | 684 | 12726536 | <filename>tests/test_boringdisabled.py
from __future__ import print_function
import logrun
import deepstate_base
class BoringDisabledTest(deepstate_base.DeepStateTestCase):
def run_deepstate(self, deepstate):
(r, output) = logrun.logrun([deepstate, "build/examples/BoringDisabled"],
"deepstate.out", 1800)
self.assertEqual(r, 0)
self.assertTrue("Passed: CharTest_BoringVerifyCheck" in output)
self.assertTrue("Failed: CharTest_VerifyCheck" in output)
|
examples/experiments/ best_naive_predictor_for_f1_score.py | timothyxp/ppscore | 765 | 12726567 | <filename>examples/experiments/ best_naive_predictor_for_f1_score.py
# %% [markdown]
# ## Determining the best naive predictor for the f1 score
# - If there are 2 classes that are skewed, then the most common value is often slightly better than the random guess
# - If there are 4 classes that are skewed, then the random value is often slightly better than the most common value
# - If the classes (2 or 4) are balanced, then the random guess is usually significantly better than the most common value.
#
# Summing up, random values are usually preferred over the most common value.
#
# However, the best baseline is the maximum of the f1_score of the most common value and random values.
# %%
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
# %%
df = pd.DataFrame(
{
"boolean_equal": np.random.choice(["yes", "no"], 1000),
"boolean_skewed": np.random.choice(["yes", "yes", "yes", "no"], 1000),
"multicat_equal": np.random.choice(["cat1", "cat2", "cat3", "cat4"], 1000),
"multicat_skewed": np.random.choice(
["cat1", "cat1", "cat1", "cat1", "cat2", "cat2", "cat3", "cat4"], 1000
),
}
)
# %%
def f1_score_most_common(series, value):
return f1_score(series, np.random.choice([value], 1000), average="weighted")
# %%
def f1_score_random(series):
return f1_score(series, series.sample(frac=1), average="weighted")
# %% [markdown]
# ### Boolean equal
# - Random is better than most common
# %%
f1_score_most_common(df["boolean_equal"], "yes")
# %%
f1_score_random(df["boolean_equal"])
# %% [markdown]
# ### Boolean skewed
# - Most common is usually better than random but they are in the same ball park
# %%
f1_score_most_common(df["boolean_skewed"], "yes")
# %%
f1_score_random(df["boolean_skewed"])
# %% [markdown]
# ### Multicat equal
# - Random is better than most common
# %%
f1_score_most_common(df["multicat_equal"], "cat1")
# %%
f1_score_random(df["multicat_equal"])
# %% [markdown]
# ### Multicat skewed
# - Random is usually better than most common but they are in the same ballpark
# %%
f1_score_most_common(df["multicat_skewed"], "cat1")
# %%
f1_score_random(df["multicat_skewed"])
|
safety/managers.py | ulule/django-safety | 157 | 12726569 | <reponame>ulule/django-safety
# -*- coding: utf-8 -*-
from django.db import models, transaction, IntegrityError
from django.utils.timezone import now
from . import app_settings
from . import utils
class PasswordChangeManager(models.Manager):
def get_or_create_for_user(self, user):
return self.get_or_create(user=user)
def is_required_for_user(self, user):
obj, created = self.get_or_create_for_user(user=user)
return obj.required
class SessionManager(models.Manager):
def active(self, user=None):
qs = self.filter(expiration_date__gt=now())
if user is not None:
qs = qs.filter(user=user)
return qs.order_by('-last_activity')
def create_session(self, request, user):
ip = utils.resolve(app_settings.IP_RESOLVER, request)
device = utils.resolve(app_settings.DEVICE_RESOLVER, request)
location = utils.resolve(app_settings.LOCATION_RESOLVER, request)
user_agent = request.META.get('HTTP_USER_AGENT', '')
user_agent = user_agent[:200] if user_agent else user_agent
try:
with transaction.atomic():
obj = self.create(
user=user,
session_key=request.session.session_key,
ip=ip,
user_agent=user_agent,
device=device,
location=location,
expiration_date=request.session.get_expiry_date(),
last_activity=now())
except IntegrityError:
obj = self.get(
user=user,
session_key=request.session.session_key)
obj.last_activity = now()
obj.save()
return obj
|
mjml/__init__.py | stenius/django-mjml | 199 | 12726597 | <filename>mjml/__init__.py
__version__ = '0.11.0'
default_app_config = 'mjml.apps.MJMLConfig'
|
examples/jina_example/app.py | procedure2012/RocketQA | 210 | 12726668 | import sys
import os
import webbrowser
from pathlib import Path
from jina import Document, Flow
def config():
os.environ.setdefault('JINA_USE_CUDA', 'False')
os.environ.setdefault('JINA_PORT_EXPOSE', '8886')
os.environ.setdefault('JINA_WORKSPACE', './workspace')
def index(file_name):
def load_marco(fn):
cnt = 0
with open(fn, 'r') as f:
for ln, line in enumerate(f):
try:
title, para = line.strip().split('\t')
doc = Document(
id=f'{cnt}',
uri=fn,
tags={'title': title, 'para': para})
cnt += 1
yield doc
except:
print(f'skip line {ln}')
continue
f = Flow().load_config('flows/index.yml')
with f:
f.post(on='/index', inputs=load_marco(file_name), show_progress=True, request_size=32)
def fillin_html():
source_fn = Path(__file__).parent.absolute() / 'static/index_template.html'
target_fn = Path(__file__).parent.absolute() / 'static/index.html'
with open(source_fn, 'r') as fp, open(target_fn, 'w') as fw:
t = fp.read()
t = t.replace('{% JINA_PORT_EXPOSE %}',
f'{os.environ.get("JINA_PORT_EXPOSE")}')
fw.write(t)
def query():
from distutils.dir_util import copy_tree
fillin_html()
copy_tree('static', 'workspace/static')
url_html_fn = Path(__file__).parent.absolute() / 'workspace/static/index.html'
url_html_path = f'file://{url_html_fn}'
f = Flow().load_config('flows/query.yml')
with f:
try:
webbrowser.open(url_html_path, new=2)
except:
pass
finally:
print(f'You should see a demo page opened in your browser'
f'if not, you may open {url_html_path} manually')
f.block()
def query_cli():
def print_topk(resp):
for doc in resp.docs:
print(doc)
doc = Document(doc)
print(f'🤖 Answers:')
for m in doc.matches:
print(f'\t{m.tags["title"]}')
print(f'\t{m.tags["para"]}')
print(f'-----')
f = Flow().load_config('flows/query.yml')
with f:
f.protocol = 'grpc'
print(f'🤖 Hi there, please ask me questions related to the indexed Documents.\n'
'For example, "Who is <NAME>\'s brother?"\n')
while True:
text = input('Question: (type `\q` to quit)')
if text == '\q' or not text:
return
f.post(on='/search', inputs=[Document(content=text), ], on_done=print_topk)
def main(task):
config()
if task == 'index':
if Path('./workspace').exists():
print('./workspace exists, please deleted it if you want to reindexi')
data_fn = sys.argv[2] if len(sys.argv) >= 3 else 'toy_data/test.tsv'
print(f'indexing {data_fn}')
index(data_fn)
elif task == 'query':
query()
elif task == 'query_cli':
query_cli()
if __name__ == '__main__':
task = sys.argv[1]
main(task)
|
__scraping__/nhle.com - requests, JSON/main.py | furas/python-code | 140 | 12726691 | <reponame>furas/python-code<gh_stars>100-1000
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2021.12.01
#
# title: Scrape for Table with Limits
# url: https://stackoverflow.com/questions/70179298/scrape-for-table-with-limits/70180875#70180875
# [Scrape for Table with Limits](https://stackoverflow.com/questions/70179298/scrape-for-table-with-limits/70180875#70180875)
import requests
import pandas as pd
# --- before loop ---
url = 'https://api.nhle.com/stats/rest/en/team/daysbetweengames'
payload = {
'isAggregate': 'false',
'isGame': 'true',
'start': 0,
'limit': 100,
'sort': '[{"property":"teamFullName","direction":"ASC"},{"property":"daysRest","direction":"DESC"},{"property":"teamId","direction":"ASC"}]',
'factCayenneExp': 'gamesPlayed>=1',
'cayenneExp': 'gameDate<="2021-11-30 23:59:59" and gameDate>="2021-10-12" and gameTypeId=2',
}
df = pd.DataFrame()
# --- loop ---
for start in range(0, 1000, 100):
print('--- start:', start, '---')
payload['start'] = start
response = requests.get(url, params=payload)
data = response.json()
df = df.append(data['data'], ignore_index=True)
# --- after loop ---
print(df)
df.to_excel('Master File.xlsx', sheet_name='Info')
print(df.iloc[0])
print(df.iloc[100])
|
text_extensions_for_pandas/array/arrow_conversion.py | ZachEichen/text-extensions-for-pandas | 193 | 12726699 | #
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# arrow_conversion.py
#
# Part of text_extensions_for_pandas
#
# Provide Arrow compatible classes for serializing to pyarrow.
#
from distutils.version import LooseVersion
import numpy as np
import pyarrow as pa
from text_extensions_for_pandas.array.span import SpanArray
from text_extensions_for_pandas.array.token_span import TokenSpanArray, _EMPTY_SPAN_ARRAY_SINGLETON
from text_extensions_for_pandas.array.tensor import TensorArray
from text_extensions_for_pandas.array.string_table import StringTable
class ArrowSpanType(pa.PyExtensionType):
"""
PyArrow extension type definition for conversions to/from Span columns
"""
BEGINS_NAME = "span_begins"
ENDS_NAME = "span_ends"
TARGET_TEXT_DICT_NAME = "target_text"
def __init__(self, index_dtype, target_text_dict_dtype):
"""
Create an instance of a Span data type with given index type and
target text dictionary type. The dictionary type will hold target text ids
that map to a dictionary of document target texts.
:param index_dtype: type for the begin, end index arrays
:param target_text_dict_dtype: type for the target text dictionary array
"""
assert pa.types.is_integer(index_dtype)
assert pa.types.is_dictionary(target_text_dict_dtype)
fields = [
pa.field(self.BEGINS_NAME, index_dtype),
pa.field(self.ENDS_NAME, index_dtype),
pa.field(self.TARGET_TEXT_DICT_NAME, target_text_dict_dtype)
]
pa.PyExtensionType.__init__(self, pa.struct(fields))
def __reduce__(self):
index_dtype = self.storage_type[self.BEGINS_NAME].type
target_text_dict_dtype = self.storage_type[self.TARGET_TEXT_DICT_NAME].type
return ArrowSpanType, (index_dtype, target_text_dict_dtype)
class ArrowTokenSpanType(pa.PyExtensionType):
"""
PyArrow extension type definition for conversions to/from TokenSpan columns
"""
BEGINS_NAME = "token_begins"
ENDS_NAME = "token_ends"
TOKENS_NAME = "tokens"
def __init__(self, index_dtype, token_dict_dtype):
"""
Create an instance of a TokenSpan data type with given index type and
target text that will be stored in Field metadata.
:param index_dtype: type for the begin, end index arrays
:param token_dict_dtype: type for the tokens dictionary array
"""
assert pa.types.is_integer(index_dtype)
assert pa.types.is_dictionary(token_dict_dtype)
fields = [
pa.field(self.BEGINS_NAME, index_dtype),
pa.field(self.ENDS_NAME, index_dtype),
pa.field(self.TOKENS_NAME, token_dict_dtype),
]
pa.PyExtensionType.__init__(self, pa.struct(fields))
def __reduce__(self):
index_dtype = self.storage_type[self.BEGINS_NAME].type
token_dict_dtype = self.storage_type[self.TOKENS_NAME].type
return ArrowTokenSpanType, (index_dtype, token_dict_dtype)
def span_to_arrow(char_span: SpanArray) -> pa.ExtensionArray:
"""
Convert a SpanArray to a pyarrow.ExtensionArray with a type
of ArrowSpanType and struct as the storage type. The resulting
extension array can be serialized and transferred with standard
Arrow protocols.
:param char_span: A SpanArray to be converted
:return: pyarrow.ExtensionArray containing Span data
"""
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise NotImplementedError("Arrow serialization for SpanArray is not supported with "
"PyArrow versions < 2.0.0")
# Create array for begins, ends
begins_array = pa.array(char_span.begin)
ends_array = pa.array(char_span.end)
# Create a dictionary array from StringTable used in this span
dictionary = pa.array([char_span._string_table.unbox(s)
for s in char_span._string_table.things])
target_text_dict_array = pa.DictionaryArray.from_arrays(char_span._text_ids, dictionary)
typ = ArrowSpanType(begins_array.type, target_text_dict_array.type)
fields = list(typ.storage_type)
storage = pa.StructArray.from_arrays([begins_array, ends_array, target_text_dict_array], fields=fields)
return pa.ExtensionArray.from_storage(typ, storage)
def arrow_to_span(extension_array: pa.ExtensionArray) -> SpanArray:
"""
Convert a pyarrow.ExtensionArray with type ArrowSpanType to
a SpanArray.
..NOTE: Only supported with PyArrow >= 2.0.0
:param extension_array: pyarrow.ExtensionArray with type ArrowSpanType
:return: SpanArray
"""
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise NotImplementedError("Arrow serialization for SpanArray is not supported with "
"PyArrow versions < 2.0.0")
if isinstance(extension_array, pa.ChunkedArray):
if extension_array.num_chunks > 1:
raise ValueError("Only pyarrow.Array with a single chunk is supported")
extension_array = extension_array.chunk(0)
# NOTE: workaround for bug in parquet reading
if pa.types.is_struct(extension_array.type):
index_dtype = extension_array.field(ArrowSpanType.BEGINS_NAME).type
target_text_dict_dtype = extension_array.field(ArrowSpanType.TARGET_TEXT_DICT_NAME).type
extension_array = pa.ExtensionArray.from_storage(
ArrowSpanType(index_dtype, target_text_dict_dtype),
extension_array)
assert pa.types.is_struct(extension_array.storage.type)
# Create target text StringTable and text_ids from dictionary array
target_text_dict_array = extension_array.storage.field(ArrowSpanType.TARGET_TEXT_DICT_NAME)
table_texts = target_text_dict_array.dictionary.to_pylist()
string_table = StringTable.from_things(table_texts)
text_ids = target_text_dict_array.indices.to_numpy()
# Get the begins/ends pyarrow arrays
begins_array = extension_array.storage.field(ArrowSpanType.BEGINS_NAME)
ends_array = extension_array.storage.field(ArrowSpanType.ENDS_NAME)
# Zero-copy convert arrays to numpy
begins = begins_array.to_numpy()
ends = ends_array.to_numpy()
return SpanArray((string_table, text_ids), begins, ends)
def token_span_to_arrow(token_span: TokenSpanArray) -> pa.ExtensionArray:
"""
Convert a TokenSpanArray to a pyarrow.ExtensionArray with a type
of ArrowTokenSpanType and struct as the storage type. The resulting
extension array can be serialized and transferred with standard
Arrow protocols.
:param token_span: A TokenSpanArray to be converted
:return: pyarrow.ExtensionArray containing TokenSpan data
"""
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise NotImplementedError("Arrow serialization for TokenSpanArray is not supported with "
"PyArrow versions < 2.0.0")
# Create arrays for begins/ends
token_begins_array = pa.array(token_span.begin_token)
token_ends_array = pa.array(token_span.end_token)
# Filter out any empty SpanArrays
non_null_tokens = token_span.tokens[~token_span.isna()]
assert len(non_null_tokens) > 0
# Get either single document as a list or use a list of all if multiple docs
if all([token is non_null_tokens[0] for token in non_null_tokens]):
tokens_arrays = [non_null_tokens[0]]
tokens_indices = pa.array([0] * len(token_span.tokens), mask=token_span.isna())
else:
raise NotImplementedError("TokenSpan Multi-doc serialization not yet implemented due to "
"ArrowNotImplementedError: Concat with dictionary unification NYI")
tokens_arrays = non_null_tokens
tokens_indices = np.zeros_like(token_span.tokens)
tokens_indices[~token_span.isna()] = range(len(tokens_arrays))
tokens_indices = pa.array(tokens_indices, mask=token_span.isna())
# Convert each token SpanArray to Arrow and get as raw storage
arrow_tokens_arrays = [span_to_arrow(sa).storage for sa in tokens_arrays]
# Create a list array with each element is an ArrowSpanArray
# TODO: pyarrow.lib.ArrowNotImplementedError: ('Sequence converter for type dictionary<values=string, indices=int8, ordered=0> not implemented', 'Conversion failed for column ts1 with type TokenSpanDtype')
#arrow_tokens_arrays_array = pa.array(arrow_tokens_arrays, type=pa.list_(arrow_tokens_arrays[0].type))
offsets = [0] + [len(a) for a in arrow_tokens_arrays]
values = pa.concat_arrays(arrow_tokens_arrays) # TODO: can't concat extension arrays?
arrow_tokens_arrays_array = pa.ListArray.from_arrays(offsets, values)
# Create a dictionary array mapping each token SpanArray index used to the list of ArrowSpanArrays
tokens_dict_array = pa.DictionaryArray.from_arrays(tokens_indices, arrow_tokens_arrays_array)
typ = ArrowTokenSpanType(token_begins_array.type, tokens_dict_array.type)
fields = list(typ.storage_type)
storage = pa.StructArray.from_arrays([token_begins_array, token_ends_array, tokens_dict_array], fields=fields)
return pa.ExtensionArray.from_storage(typ, storage)
def arrow_to_token_span(extension_array: pa.ExtensionArray) -> TokenSpanArray:
"""
Convert a pyarrow.ExtensionArray with type ArrowTokenSpanType to
a TokenSpanArray.
:param extension_array: pyarrow.ExtensionArray with type ArrowTokenSpanType
:return: TokenSpanArray
"""
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
raise NotImplementedError("Arrow serialization for TokenSpanArray is not supported with "
"PyArrow versions < 2.0.0")
if isinstance(extension_array, pa.ChunkedArray):
if extension_array.num_chunks > 1:
raise ValueError("Only pyarrow.Array with a single chunk is supported")
extension_array = extension_array.chunk(0)
assert pa.types.is_struct(extension_array.storage.type)
# Get the begins/ends pyarrow arrays
token_begins_array = extension_array.storage.field(ArrowTokenSpanType.BEGINS_NAME)
token_ends_array = extension_array.storage.field(ArrowTokenSpanType.ENDS_NAME)
# Get the tokens as a dictionary array where indices map to a list of ArrowSpanArrays
tokens_dict_array = extension_array.storage.field(ArrowTokenSpanType.TOKENS_NAME)
tokens_indices = tokens_dict_array.indices
arrow_tokens_arrays_array = tokens_dict_array.dictionary
# Breakup the list of ArrowSpanArrays and convert back to individual SpanArrays
tokens_arrays = []
span_type = None
for i in range(1, len(arrow_tokens_arrays_array.offsets)):
start = arrow_tokens_arrays_array.offsets[i - 1].as_py()
stop = arrow_tokens_arrays_array.offsets[i].as_py()
arrow_tokens_array = arrow_tokens_arrays_array.values[start:stop]
# Make an instance of ArrowSpanType
if span_type is None:
begins_array = arrow_tokens_array.field(ArrowSpanType.BEGINS_NAME)
target_text_dict_array = arrow_tokens_array.field(ArrowSpanType.TARGET_TEXT_DICT_NAME)
span_type = ArrowSpanType(begins_array.type, target_text_dict_array.type)
# Re-make the Arrow extension type to convert back to a SpanArray
tokens_array = arrow_to_span(pa.ExtensionArray.from_storage(span_type, arrow_tokens_array))
tokens_arrays.append(tokens_array)
# Map the token indices to the actual token SpanArray for each element in the TokenSpanArray
tokens = [_EMPTY_SPAN_ARRAY_SINGLETON if i is None else tokens_arrays[i]
for i in tokens_indices.to_pylist()]
# Zero-copy convert arrays to numpy
token_begins = token_begins_array.to_numpy()
token_ends = token_ends_array.to_numpy()
return TokenSpanArray(tokens, token_begins, token_ends)
class ArrowTensorType(pa.PyExtensionType):
"""
pyarrow ExtensionType definition for TensorDtype
:param element_shape: Fixed shape for each tensor element of the array, the
outer dimension is the number of elements, or length,
of the array.
"""
def __init__(self, element_shape, pyarrow_dtype):
self._element_shape = element_shape
pa.PyExtensionType.__init__(self, pa.list_(pyarrow_dtype))
def __reduce__(self):
return ArrowTensorType, (self._element_shape, self.storage_type.value_type)
@property
def shape(self):
return self._element_shape
def __arrow_ext_class__(self):
return ArrowTensorArray
class ArrowTensorArray(pa.ExtensionArray):
"""
A batch of tensors with fixed shape.
"""
def __init__(self):
raise TypeError("Do not call ArrowTensorBatch constructor directly, "
"use one of the `ArrowTensorBatch.from_*` functions "
"instead.")
@staticmethod
def from_numpy(obj, batch_size=None):
"""
Convert a list of numpy.ndarrays with equal shapes or as single
numpy.ndarray with outer-dim as batch size to a pyarrow.Array
"""
if isinstance(obj, (list, tuple)):
if batch_size is not None:
def list_gen():
for i in range(0, len(obj), batch_size):
slc = obj[i:i + batch_size]
yield ArrowTensorArray.from_numpy(slc, batch_size=None)
return list_gen()
elif np.isscalar(obj[0]):
return pa.array(obj)
elif isinstance(obj[0], np.ndarray):
# continue with batched ndarray
obj = np.stack(obj, axis=0)
if isinstance(obj, dict):
names = list(obj.keys())
arrs = [ArrowTensorArray.from_numpy(obj[k], batch_size=batch_size)
for k in names]
batch = pa.RecordBatch.from_arrays(arrs, names)
return pa.Table.from_batches([batch])
elif isinstance(obj, np.ndarray):
# currently require contiguous ndarray
if not obj.flags.c_contiguous:
obj = np.ascontiguousarray(obj)
pa_dtype = pa.from_numpy_dtype(obj.dtype)
batch_size = obj.shape[0]
element_shape = obj.shape[1:]
total_num_elements = obj.size
num_elements = 1 if len(obj.shape) == 1 else np.prod(element_shape)
child_buf = pa.py_buffer(obj)
child_array = pa.Array.from_buffers(pa_dtype, total_num_elements, [None, child_buf])
offset_buf = pa.py_buffer(np.int32([i * num_elements for i in range(batch_size + 1)]))
storage = pa.Array.from_buffers(pa.list_(pa_dtype), batch_size,
[None, offset_buf], children=[child_array])
typ = ArrowTensorType(element_shape, pa_dtype)
return pa.ExtensionArray.from_storage(typ, storage)
elif np.isscalar(obj):
return pa.array([obj])
else:
def iter_gen():
if batch_size is None:
for d in obj:
yield ArrowTensorArray.from_numpy(d, batch_size=batch_size)
else:
batch = []
for o in obj:
batch.append(o)
if len(batch) == batch_size:
# merge dict
if isinstance(batch[0], dict):
d = {k: [v] for k, v in batch[0].items()}
for i in range(1, len(batch)):
for k, v in batch[i].items():
d[k].append(v)
for k in d.keys():
d[k] = np.stack(d[k], axis=0)
batch = d
yield ArrowTensorArray.from_numpy(batch, batch_size=None)
batch = []
return iter_gen()
def to_numpy(self):
shape = (len(self),) + self.type.shape
buf = self.storage.buffers()[3]
storage_list_type = self.storage.type
ext_dtype = storage_list_type.value_type.to_pandas_dtype()
return np.ndarray(shape, buffer=buf, dtype=ext_dtype)
def arrow_to_tensor_array(extension_array: pa.ExtensionArray) -> TensorArray:
"""
Convert a pyarrow.ExtensionArray with type ArrowTensorType to a
TensorArray.
:param extension_array: pyarrow.ExtensionArray with type ArrowTensorType
:return: TensorArray
"""
if isinstance(extension_array, pa.ChunkedArray):
if extension_array.num_chunks > 1:
# TODO: look into removing concat and constructing from list w/ shape
values = np.concatenate([chunk.to_numpy()
for chunk in extension_array.iterchunks()])
else:
values = extension_array.chunk(0).to_numpy()
else:
values = extension_array.to_numpy()
return TensorArray(values)
|
src/encode_lib_log_parser.py | motorny/chip-seq-pipeline2 | 261 | 12726761 | #!/usr/bin/env python
"""
ENCODE QC log parser wrapper which converts a log file into a dict
Author: <NAME> (<EMAIL>)
"""
from collections import OrderedDict
MAP_KEY_DESC_FRAC_MITO_QC = {
'non_mito_reads': 'Rn = Number of Non-mitochondrial Reads',
'mito_reads': 'Rm = Number of Mitochondrial Reads',
'frac_mito_reads': 'Rm/(Rn+Rm) = Frac. of mitochondrial reads'
}
def parse_frac_mito_qc(txt):
result = OrderedDict()
with open(txt, 'r') as fp:
for line in fp.read().strip('\n').split('\n'):
k, v = line.split('\t')
if k.startswith('frac_'):
result[k] = float(v)
else:
result[k] = int(v)
return result
MAP_KEY_DESC_FLAGSTAT_QC = {
'total_reads': 'Total Reads',
'total_reads_qc_failed': 'Total Reads (QC-failed)',
'duplicate_reads': 'Duplicate Reads',
'duplicate_reads_qc_failed': 'Duplicate Reads (QC-failed)',
'mapped_reads': 'Mapped Reads',
'mapped_reads_qc_failed': 'Mapped Reads (QC-failed)',
'pct_mapped_reads': '% Mapped Reads',
'paired_reads': 'Paired Reads',
'paired_reads_qc_failed': 'Paired Reads (QC-failed)',
'read1': 'Read1',
'read1_qc_failed': 'Read1 (QC-failed)',
'read2': 'Read2',
'read2_qc_failed': 'Read2 (QC-failed)',
'properly_paired_reads': 'Properly Paired Reads',
'properly_paired_reads_qc_failed': 'Properly Paired Reads (QC-failed)',
'pct_properly_paired_reads': '% Properly Paired Reads',
'with_itself': 'With itself',
'with_itself_qc_failed': 'With itself (QC-failed)',
'singletons': 'Singletons',
'singletons_qc_failed': 'Singletons (QC-failed)',
'pct_singletons': '% Singleton',
'diff_chroms': 'Diff. Chroms',
'diff_chroms_qc_failed': 'Diff. Chroms (QC-failed)',
}
def parse_flagstat_qc(txt):
result = OrderedDict()
if not txt:
return result
total = ''
total_qc_failed = ''
duplicates = ''
duplicates_qc_failed = ''
mapped = ''
mapped_qc_failed = ''
mapped_pct = ''
paired = ''
paired_qc_failed = ''
read1 = ''
read1_qc_failed = ''
read2 = ''
read2_qc_failed = ''
paired_properly = ''
paired_properly_qc_failed = ''
paired_properly_pct = ''
with_itself = ''
with_itself_qc_failed = ''
singletons = ''
singletons_qc_failed = ''
singletons_pct = ''
diff_chroms = ''
diff_chroms_qc_failed = ''
delimiter_pass_fail = ' + '
with open(txt, 'r') as f:
for line in f:
if ' total ' in line:
if ' in total ' in line:
tmp1 = line.split(' in total ')
else:
tmp1 = line.split(' total ')
line1 = tmp1[0]
tmp1 = line1.split(delimiter_pass_fail)
total = tmp1[0]
total_qc_failed = tmp1[1]
if ' duplicates' in line:
tmp2 = line.split(' duplicates')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
duplicates = tmp2[0]
duplicates_qc_failed = tmp2[1]
if ' mapped (' in line:
tmp3 = line.split(' mapped (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
mapped = tmp3_1[0]
mapped_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
mapped_pct = tmp3_2[0] # .replace('%','')
if ' paired in sequencing' in line:
tmp2 = line.split(' paired in sequencing')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
paired = tmp2[0]
paired_qc_failed = tmp2[1]
if ' read1' in line:
tmp2 = line.split(' read1')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read1 = tmp2[0]
read1_qc_failed = tmp2[1]
if ' read2' in line:
tmp2 = line.split(' read2')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read2 = tmp2[0]
read2_qc_failed = tmp2[1]
if ' properly paired (' in line:
tmp3 = line.split(' properly paired (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
paired_properly = tmp3_1[0]
paired_properly_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
paired_properly_pct = tmp3_2[0] # .replace('%','')
if ' with itself and mate mapped' in line:
tmp3 = line.split(' with itself and mate mapped')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
with_itself = tmp3_1[0]
with_itself_qc_failed = tmp3_1[1]
if ' singletons (' in line:
tmp3 = line.split(' singletons (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
singletons = tmp3_1[0]
singletons_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
singletons_pct = tmp3_2[0] # .replace('%','')
if ' with mate mapped to a different chr' in line:
tmp3 = line.split(' with mate mapped to a different chr')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
diff_chroms = tmp3_1[0]
diff_chroms_qc_failed = tmp3_1[1]
if total:
result['total_reads'] = int(total)
if total_qc_failed:
result['total_reads_qc_failed'] = int(total_qc_failed)
if duplicates:
result['duplicate_reads'] = int(duplicates)
if duplicates_qc_failed:
result['duplicate_reads_qc_failed'] = int(duplicates_qc_failed)
if mapped:
result['mapped_reads'] = int(mapped)
if mapped_qc_failed:
result['mapped_reads_qc_failed'] = int(mapped_qc_failed)
if mapped_pct:
if 'nan' not in mapped_pct and 'N/A' not in mapped_pct \
and 'NA' not in mapped_pct:
if '%' in mapped_pct:
mapped_pct = mapped_pct.replace('%', '')
result['pct_mapped_reads'] = float(mapped_pct)
else:
result['pct_mapped_reads'] = 100.0 * float(mapped_pct)
else:
result['pct_mapped_reads'] = 0.0
if paired:
result['paired_reads'] = int(paired)
if paired_qc_failed:
result['paired_reads_qc_failed'] = int(paired_qc_failed)
if read1:
result['read1'] = int(read1)
if read1_qc_failed:
result['read1_qc_failed'] = int(read1_qc_failed)
if read2:
result['read2'] = int(read2)
if read2_qc_failed:
result['read2_qc_failed'] = int(read2_qc_failed)
if paired_properly:
result['properly_paired_reads'] = int(paired_properly)
if paired_properly_qc_failed:
result['properly_paired_reads_qc_failed'] = int(
paired_properly_qc_failed)
if paired_properly_pct:
if 'nan' not in paired_properly_pct and \
'N/A' not in paired_properly_pct \
and 'NA' not in paired_properly_pct:
if '%' in paired_properly_pct:
paired_properly_pct = paired_properly_pct.replace('%', '')
result['pct_properly_paired_reads'] = float(
paired_properly_pct)
else:
result['pct_properly_paired_reads'] = 100.0 * \
float(paired_properly_pct)
else:
result['pct_properly_paired_reads'] = 0.0
if with_itself:
result['with_itself'] = int(with_itself)
if with_itself_qc_failed:
result['with_itself_qc_failed'] = int(with_itself_qc_failed)
if singletons:
result['singletons'] = int(singletons)
if singletons_qc_failed:
result['singletons_qc_failed'] = int(singletons_qc_failed)
if singletons_pct:
if 'nan' not in singletons_pct and 'N/A' not in singletons_pct \
and 'NA' not in singletons_pct:
if '%' in singletons_pct:
singletons_pct = singletons_pct.replace('%', '')
result['pct_singletons'] = float(singletons_pct)
else:
result['pct_singletons'] = 100.0 * float(singletons_pct)
else:
result['pct_singletons'] = 0.0
if diff_chroms:
result['diff_chroms'] = int(diff_chroms)
if diff_chroms_qc_failed:
result['diff_chroms_qc_failed'] = int(diff_chroms_qc_failed)
return result
MAP_KEY_DESC_DUP_QC = {
'unpaired_reads': 'Unpaired Reads',
'paired_reads': 'Paired Reads',
'unmapped_reads': 'Unmapped Reads',
'unpaired_duplicate_reads': 'Unpaired Duplicate Reads',
'paired_duplicate_reads': 'Paired Duplicate Reads',
'paired_optical_duplicate_reads': 'Paired Optical Duplicate Reads',
'pct_duplicate_reads': '% Duplicate Reads',
}
def parse_dup_qc(txt):
result = OrderedDict()
if not txt:
return result
paired_reads = ''
unpaired_reads = ''
unmapped_reads = ''
unpaired_dupes = ''
paired_dupes = ''
paired_opt_dupes = ''
dupes_pct = ''
picard_log_found = False
# picard markdup
with open(txt, 'r') as f:
header = '' # if 'UNPAIRED_READS_EXAMINED' in header
content = ''
for line in f:
if header:
content = line.replace(',', '.')
picard_log_found = True
break
if 'UNPAIRED_READS_EXAMINED' in line:
header = line
if picard_log_found:
header_items = header.split('\t')
content_items = content.split('\t')
m = dict(zip(header_items, content_items))
unpaired_reads = m['UNPAIRED_READS_EXAMINED']
paired_reads = m['READ_PAIRS_EXAMINED']
unmapped_reads = m['UNMAPPED_READS']
unpaired_dupes = m['UNPAIRED_READ_DUPLICATES']
paired_dupes = m['READ_PAIR_DUPLICATES']
paired_opt_dupes = m['READ_PAIR_OPTICAL_DUPLICATES']
if 'PERCENT_DUPLICATION' in m:
dupes_pct = m['PERCENT_DUPLICATION']
else:
dupes_pct = '0'
else:
# sambamba markdup
with open(txt, 'r') as f:
for line in f:
if ' end pairs' in line:
tmp1 = line.strip().split(' ')
paired_reads = tmp1[1]
if ' single ends ' in line:
tmp1 = line.strip().split(' ')
unpaired_reads = tmp1[1]
unmapped_reads = tmp1[6]
if 'found ' in line:
tmp1 = line.strip().split(' ')
if paired_reads == '0':
unpaired_dupes = tmp1[1] # SE
paired_dupes = 0
else:
unpaired_dupes = 0
paired_dupes = str(int(tmp1[1])/2) # PE
if paired_reads == '0': # SE
dupes_pct = '{0:.2f}'.format(
float(unpaired_dupes)/float(unpaired_reads))
elif paired_reads:
dupes_pct = '{0:.2f}'.format(
float(paired_dupes)/float(paired_reads))
if unpaired_reads:
result['unpaired_reads'] = int(unpaired_reads)
if paired_reads:
result['paired_reads'] = int(paired_reads)
if unmapped_reads:
result['unmapped_reads'] = int(unmapped_reads)
if unpaired_dupes:
result['unpaired_duplicate_reads'] = int(unpaired_dupes)
if paired_dupes:
result['paired_duplicate_reads'] = int(paired_dupes)
if paired_opt_dupes:
result['paired_optical_duplicate_reads'] = int(paired_opt_dupes)
if dupes_pct:
result['pct_duplicate_reads'] = float(dupes_pct)*100.0
return result
MAP_KEY_DESC_LIB_COMPLEXITY_QC = {
'total_fragments': 'Total Fragments',
'distinct_fragments': 'Distinct Fragments',
'positions_with_one_read': 'Positions with One Read',
'positions_with_one_read': 'Positions with Two Read',
'NRF': 'NRF = Distinct/Total',
'PBC1': 'PBC1 = OneRead/Distinct',
'PBC2': 'PBC2 = OneRead/TwoRead'
}
def parse_lib_complexity_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
for line in f:
arr = line.strip().split('\t')
break
result['total_fragments'] = to_int(arr[0])
result['distinct_fragments'] = to_int(arr[1])
result['positions_with_one_read'] = to_int(arr[2])
result['positions_with_one_read'] = to_int(arr[3])
result['NRF'] = to_float(arr[4])
result['PBC1'] = to_float(arr[5])
result['PBC2'] = to_float(arr[6])
return result
MAP_KEY_DESC_XCOR_SCORE = {
'subsampled_reads': 'Number of Subsampled Reads',
'estimated_fragment_len': 'Estimated Fragment Length',
'corr_estimated_fragment_len':
'Cross-correlation at Estimated Fragment Length',
'phantom_peak': 'Phantom Peak',
'corr_phantom_peak': 'Cross-correlation at Phantom Peak',
'argmin_corr': 'Argmin of Cross-correlation',
'min_corr': 'Minimum of Cross-correlation',
'NSC': 'NSC (Normalized Strand Cross-correlation coeff.)',
'RSC': 'RSC (Relative Strand Cross-correlation coeff.)',
}
def parse_xcor_score(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
arr = f.readlines()[0].strip().split('\t')
result['subsampled_reads'] = int(arr[1])
result['estimated_fragment_len'] = int(arr[2])
result['corr_estimated_fragment_len'] = float(arr[3])
result['phantom_peak'] = int(arr[4])
result['corr_phantom_peak'] = float(arr[5])
result['argmin_corr'] = int(arr[6])
result['min_corr'] = float(arr[7])
result['NSC'] = float(arr[8])
result['RSC'] = float(arr[9])
return result
MAP_KEY_DESC_JSD_QC = {
'pct_genome_enrich': '% Genome Enriched',
'auc': 'AUC',
'ch_div': 'CHANCE Divergence',
'elbow_pt': 'Elbow Point',
'jsd': 'JS Distance',
'syn_auc': 'Synthetic AUC',
'syn_elbow_pt': 'Synthetic Elbow Point',
'syn_jsd': 'Synthetic JS Distance',
'syn_x_intercept': 'Synthetic X-intercept',
'x_intercept': 'X-intercept',
'diff_enrich': 'Diff. Enrichment',
}
def parse_jsd_qc(txt):
"""Works for JSD log from deepTools >= 3.0
https://github.com/deeptools/deepTools/blob/master/deeptools/plotFingerprint.py#L454
"""
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
arr = f.readlines()[0].strip().split('\t')
result['auc'] = float(arr[0])
result['syn_auc'] = float(arr[1])
result['x_intercept'] = float(arr[2])
result['syn_x_intercept'] = float(arr[3])
result['elbow_pt'] = float(arr[4])
result['syn_elbow_pt'] = float(arr[5])
if len(arr) > 7:
# with --JSDSample (control) only
result['jsd'] = float(arr[6])
result['syn_jsd'] = float(arr[7])
result['pct_genome_enrich'] = float(arr[8])
result['diff_enrich'] = float(arr[9])
result['ch_div'] = float(arr[10])
else:
result['syn_jsd'] = float(arr[6])
return result
MAP_KEY_DESC_REPRODUCIBILITY_QC = {
'Np': 'Np',
'Nt': 'Nt',
'N1': 'N1',
'N2': 'N2',
'N3': 'N3',
'N4': 'N4',
'N5': 'N5',
'N6': 'N6',
'N7': 'N7',
'N8': 'N8',
'N9': 'N9',
'N10': 'N10',
'N_opt': 'N optimal',
'N_consv': 'N conservative',
'opt_set': 'Optimal Set',
'consv_set': 'Conservative Set',
'rescue_ratio': 'Rescue Ratio',
'self_consistency_ratio': 'Self Consistency Ratio',
'reproducibility': 'Reproducibility Test',
}
def parse_reproducibility_qc(txt):
if not txt:
return OrderedDict()
with open(txt, 'r') as f:
lines = f.readlines()
header = lines[0].strip()
content = lines[1].strip()
result = OrderedDict(
zip(header.split('\t'), content.split('\t')))
for key in result:
if key.startswith('N'):
result[key] = int(result[key])
if key.endswith('_ratio'):
result[key] = float(result[key])
return result
MAP_KEY_DESC_FRIP_QC = {
'frip': 'Fraction of Reads in Peaks',
}
def parse_frip_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
frip = f.readlines()[0].strip()
result['frip'] = float(frip)
return result
MAP_KEY_DESC_ANNOT_ENRICH_QC = {
'fri_dhs': 'Fraction of Reads in universal DHS regions',
'fri_blacklist': 'Fraction of Reads in blacklist regions',
'fri_prom': 'Fraction of Reads in promoter regions',
'fri_enh': 'Fraction of Reads in enhancer regions',
}
def parse_annot_enrich_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as fp:
lines = fp.read().strip('\n').split('\n')
for line in lines:
key, reads, frac = line.split('\t')
frac = to_float(frac)
if key == 'fraction_of_reads_in_universal_DHS_regions':
result['fri_dhs'] = frac
elif key == 'fraction_of_reads_in_blacklist_regions':
result['fri_blacklist'] = frac
elif key == 'fraction_of_reads_in_promoter_regions':
result['fri_prom'] = frac
elif key == 'fraction_of_reads_in_enhancer_regions':
result['fri_enh'] = frac
else:
raise ValueError(
'Wrong line in annot_enrich QC file')
return result
MAP_KEY_DESC_PICARD_EST_LIB_SIZE_QC = {
'picard_est_lib_size': 'Estimated library size by Picard tools',
}
def parse_picard_est_lib_size_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
val = f.readlines()[0].strip()
result['picard_est_lib_size'] = float(val)
return result
MAP_KEY_DESC_TSS_ENRICH_QC = {
'tss_enrich': 'TSS enrichment',
}
def parse_tss_enrich_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
val = f.readlines()[0].strip()
result['tss_enrich'] = float(val)
return result
MAP_KEY_DESC_NUCLEOSOMAL_QC = {
'frac_reads_in_nfr': 'Fraction of reads in NFR',
'frac_reads_in_nfr_qc_pass': 'Fraction of reads in NFR (QC pass)',
'frac_reads_in_nfr_qc_reason': 'Fraction of reads in NFR (QC reason)',
'nfr_over_mono_nuc_reads': 'NFR / mono-nuc reads',
'nfr_over_mono_nuc_reads_qc_pass': 'NFR / mono-nuc reads (QC pass)',
'nfr_over_mono_nuc_reads_qc_reason': 'NFR / mono-nuc reads (QC reason)',
'nfr_peak_exists': 'Presence of NFR peak',
'mono_nuc_peak_exists': 'Presence of Mono-Nuc peak',
'di_nuc_peak_exists': 'Presence of Di-Nuc peak',
}
def parse_nucleosomal_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as fp:
lines = fp.read().strip('\n').split('\n')
for line in lines:
arr = line.split('\t')
key = arr[0]
if key == 'Fraction of reads in NFR':
result['frac_reads_in_nfr'] = to_float(arr[2])
result['frac_reads_in_nfr_qc_pass'] = to_bool(arr[1])
result['frac_reads_in_nfr_qc_reason'] = arr[3]
elif key == 'NFR / mono-nuc reads':
result['nfr_over_mono_nuc_reads'] = to_float(arr[2])
result['nfr_over_mono_nuc_reads_qc_pass'] = to_bool(arr[1])
result['nfr_over_mono_nuc_reads_qc_reason'] = arr[3]
elif key == 'Presence of NFR peak':
result['nfr_peak_exists'] = to_bool(arr[1])
elif key == 'Presence of Mono-Nuc peak':
result['mono_nuc_peak_exists'] = to_bool(arr[1])
elif key == 'Presence of Di-Nuc peak':
result['di_nuc_peak_exists'] = to_bool(arr[1])
else:
raise ValueError(
'Wrong line in nucleosomal QC file')
return result
MAP_KEY_DESC_PEAK_REGION_SIZE_QC = {
'min_size': 'Min size',
'25_pct': '25 percentile',
'50_pct': '50 percentile (median)',
'75_pct': '75 percentile',
'max_size': 'Max size',
'mean': 'Mean',
}
def parse_peak_region_size_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as fp:
lines = fp.read().strip('\n').split('\n')
for line in lines:
key, val = line.split('\t')
if key == 'Min size':
result['min_size'] = to_float(val)
elif key == '25 percentile':
result['25_pct'] = to_float(val)
elif key == '50 percentile (median)':
result['50_pct'] = to_float(val)
elif key == '75 percentile':
result['75_pct'] = to_float(val)
elif key == 'Max size':
result['max_size'] = to_float(val)
elif key == 'Mean':
result['mean'] = to_float(val)
else:
raise ValueError(
'Wrong line in peak region size log file')
return result
MAP_KEY_DESC_NUM_PEAK_QC = {
'num_peaks': 'Number of peaks',
}
def parse_num_peak_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
val = f.readlines()[0].strip()
result['num_peaks'] = int(val)
return result
def to_int(var):
try:
return int(var)
except ValueError:
return None
def to_float(var):
try:
return float(var)
except ValueError:
return None
def to_bool(var):
return var.lower() in ('true', 't', 'ok', 'yes', '1')
|
tests/extensions/redash-dummy/redash_dummy/extension.py | zero1number/redash | 20,680 | 12726812 | <reponame>zero1number/redash<gh_stars>1000+
module_attribute = "hello!"
def extension(app):
"""This extension will work"""
return "extension loaded"
def assertive_extension(app):
"""This extension won't work"""
assert False
|
packages/vaex-astro/vaex/astro/astropy_table.py | sethvargo/vaex | 337 | 12726853 | <filename>packages/vaex-astro/vaex/astro/astropy_table.py
import numpy as np
from vaex.dataset import DatasetArrays
from vaex.dataset_misc import _try_unit
class DatasetAstropyTable(DatasetArrays):
def __init__(self, filename=None, format=None, table=None, **kwargs):
self.ucds = {}
self.units = {}
columns = {}
if table is None:
self.filename = filename
self.format = format
self.read_table()
else:
self.description = table.meta.get("description")
self.table = table
for i in range(len(self.table.dtype)):
name = self.table.dtype.names[i]
column = self.table[name]
type = self.table.dtype[i]
if type.kind in "fiuSU": # only store float and int
masked_array = self.table[name].data
if "ucd" in column._meta:
self.ucds[name] = column._meta["ucd"]
if column.unit:
unit = _try_unit(column.unit)
if unit:
self.units[name] = unit
if column.description:
self.descriptions[name] = column.description
if hasattr(masked_array, "mask"):
if type.kind in ["f"]:
masked_array.data[masked_array.mask] = np.nan
if type.kind in ["i"]:
masked_array.data[masked_array.mask] = 0
columns[name] = self.table[name].data
if type.kind in ["SU"]:
columns[name] = self.table[name].data
super().__init__(columns)
def read_table(self):
self.table = astropy.table.Table.read(self.filename, format=self.format, **kwargs)
|
src/oci/apm_synthetics/models/monitor_configuration.py | Manny27nyc/oci-python-sdk | 249 | 12726868 | <filename>src/oci/apm_synthetics/models/monitor_configuration.py
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class MonitorConfiguration(object):
"""
Details of monitor configuration.
"""
#: A constant which can be used with the config_type property of a MonitorConfiguration.
#: This constant has a value of "BROWSER_CONFIG"
CONFIG_TYPE_BROWSER_CONFIG = "BROWSER_CONFIG"
#: A constant which can be used with the config_type property of a MonitorConfiguration.
#: This constant has a value of "SCRIPTED_BROWSER_CONFIG"
CONFIG_TYPE_SCRIPTED_BROWSER_CONFIG = "SCRIPTED_BROWSER_CONFIG"
#: A constant which can be used with the config_type property of a MonitorConfiguration.
#: This constant has a value of "REST_CONFIG"
CONFIG_TYPE_REST_CONFIG = "REST_CONFIG"
#: A constant which can be used with the config_type property of a MonitorConfiguration.
#: This constant has a value of "SCRIPTED_REST_CONFIG"
CONFIG_TYPE_SCRIPTED_REST_CONFIG = "SCRIPTED_REST_CONFIG"
def __init__(self, **kwargs):
"""
Initializes a new MonitorConfiguration object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.apm_synthetics.models.ScriptedRestMonitorConfiguration`
* :class:`~oci.apm_synthetics.models.ScriptedBrowserMonitorConfiguration`
* :class:`~oci.apm_synthetics.models.RestMonitorConfiguration`
* :class:`~oci.apm_synthetics.models.BrowserMonitorConfiguration`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param config_type:
The value to assign to the config_type property of this MonitorConfiguration.
Allowed values for this property are: "BROWSER_CONFIG", "SCRIPTED_BROWSER_CONFIG", "REST_CONFIG", "SCRIPTED_REST_CONFIG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type config_type: str
:param is_failure_retried:
The value to assign to the is_failure_retried property of this MonitorConfiguration.
:type is_failure_retried: bool
"""
self.swagger_types = {
'config_type': 'str',
'is_failure_retried': 'bool'
}
self.attribute_map = {
'config_type': 'configType',
'is_failure_retried': 'isFailureRetried'
}
self._config_type = None
self._is_failure_retried = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['configType']
if type == 'SCRIPTED_REST_CONFIG':
return 'ScriptedRestMonitorConfiguration'
if type == 'SCRIPTED_BROWSER_CONFIG':
return 'ScriptedBrowserMonitorConfiguration'
if type == 'REST_CONFIG':
return 'RestMonitorConfiguration'
if type == 'BROWSER_CONFIG':
return 'BrowserMonitorConfiguration'
else:
return 'MonitorConfiguration'
@property
def config_type(self):
"""
Gets the config_type of this MonitorConfiguration.
Type of configuration.
Allowed values for this property are: "BROWSER_CONFIG", "SCRIPTED_BROWSER_CONFIG", "REST_CONFIG", "SCRIPTED_REST_CONFIG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The config_type of this MonitorConfiguration.
:rtype: str
"""
return self._config_type
@config_type.setter
def config_type(self, config_type):
"""
Sets the config_type of this MonitorConfiguration.
Type of configuration.
:param config_type: The config_type of this MonitorConfiguration.
:type: str
"""
allowed_values = ["BROWSER_CONFIG", "SCRIPTED_BROWSER_CONFIG", "REST_CONFIG", "SCRIPTED_REST_CONFIG"]
if not value_allowed_none_or_none_sentinel(config_type, allowed_values):
config_type = 'UNKNOWN_ENUM_VALUE'
self._config_type = config_type
@property
def is_failure_retried(self):
"""
Gets the is_failure_retried of this MonitorConfiguration.
If isFailureRetried is enabled, then a failed call will be retried.
:return: The is_failure_retried of this MonitorConfiguration.
:rtype: bool
"""
return self._is_failure_retried
@is_failure_retried.setter
def is_failure_retried(self, is_failure_retried):
"""
Sets the is_failure_retried of this MonitorConfiguration.
If isFailureRetried is enabled, then a failed call will be retried.
:param is_failure_retried: The is_failure_retried of this MonitorConfiguration.
:type: bool
"""
self._is_failure_retried = is_failure_retried
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
src/nap/rpc/client.py | timgates42/django-nap | 114 | 12726892 | <filename>src/nap/rpc/client.py
import json
import requests
class RPCProxy:
def __init__(self, client, name):
self.client = client
self.name = name
def __call__(self, **kwargs):
resp = self.client.session.post(
self.client.endpoint,
data=json.dumps(kwargs),
headers={
'X-Rpc-Action': self.name,
'Content-Type': 'application/json',
},
)
return resp.json()
class RPCClient:
def __init__(self, endpoint):
self.endpoint = endpoint
self.session = requests.Session()
def __getattr__(self, key):
return RPCProxy(self, key)
|
test/test.py | stonewell/pymterm | 102 | 12726929 | <filename>test/test.py
# -*- coding: utf8 -*-
import os
os.environ['PYGAME_FREETYPE'] = '1'
import pygame
import pygame.freetype
pygame.init()
#font = pygame.freetype.SysFont('Menlo Regular', 13)
font = pygame.freetype.Font('c:\\github\\pymterm\\data\\fonts\\wqy-microhei-mono.ttf', 13)
#font = pygame.font.Font('c:\\github\\pymterm\\data\\fonts\\wqy-microhei-mono.ttf', 13)
#font = pygame.font.Font('/home/stone/Work/personal/pymterm/data/fonts/wqy-microhei-mono.ttf', 13)
#font.ucs4 = True #should be useless. defaults to true
print font.get_sized_height(), font.get_sized_ascender(), font.get_sized_descender(), font.get_rect('ABCDabcd')
print font.get_rect('g').left, font.get_rect('g').top
print font.get_metrics('g'), font.get_metrics('s'), font.get_metrics('l'), font.get_metrics('ls'), font.get_metrics('lg') , font.get_metrics(' ')
print font.get_rect('l'), font.get_rect('ls'), font.get_rect('s'), font.get_rect('lg'), font.get_rect('g'), font.get_rect(' ')
print font.render(' ', (0,0,0,0))[1]
surf = font.render(u'黒 ♧', (255, 0, 0, 1))[0]
pygame.image.save(surf, 'image.png')
print 'image saved'
|
RecoBTag/Skimming/python/btagMC_QCD_380_470_cfi.py | ckamtsikis/cmssw | 852 | 12726988 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
btagMC_QCD_380_470 = cms.EDFilter("BTagSkimMC",
mcProcess = cms.string('QCD'),
pthat_min = cms.double(380.0),
verbose = cms.untracked.bool(False),
pthat_max = cms.double(470.0)
)
|
fum/tests.py | jsavikko/futurice-ldap-user-manager | 111 | 12726998 | <reponame>jsavikko/futurice-ldap-user-manager
import sshpubkeys
import unittest
class UtilTestCase(unittest.TestCase):
def test_ssh_key_bits_and_fingerprint(self):
with self.assertRaises(sshpubkeys.InvalidKeyException):
sshpubkeys.SSHKey('an invalid key string')
valid_ssh_key = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3uta/x/kAwbs2G7AOUQtRG7l1hjEws4mrvnTZmwICoGNi+TUwxerZgMbBBID7Kpza/ZSUqXpKX5gppRW9zECBsbJ+2D0ch/oVSZ408aUE6ePNzJilLA/2wtRct/bkHDZOVI+iwEEr1IunjceF+ZQxnylUv44C6SgZvrDj+38hz8z1Vf4BtW5jGOhHkddTadU7Nn4jQR3aFXMoheuu/vHYD2OyDJj/r6vh9x5ey8zFmwsGDtFCCzzLgcfPYfOdDxFIWhsopebnH3QHVcs/E0KqhocsEdFDRvcFgsDCKwmtHyZVAOKym2Pz9TfnEdGeb+eKrleZVsApFrGtSIfcf4pH user@host'
ssh_key = sshpubkeys.SSHKey(valid_ssh_key)
self.assertEqual(ssh_key.bits, 2048)
self.assertEqual(ssh_key.hash(),
'73:e7:0c:60:7b:d2:7b:df:81:2e:c2:57:54:53:81:91')
|
Chapter01/01_reading_image.py | debojyoti007/OpenCV | 105 | 12727010 | <gh_stars>100-1000
import cv2
img = cv2.imread('./images/input.jpg')
cv2.imshow('Input image', img)
cv2.waitKey() |
zeus/web/hooks/base.py | conrad-kronos/zeus | 221 | 12727025 | from flask import current_app, jsonify, request, Response
from flask.views import View
from sqlalchemy.orm import joinedload
from zeus import auth
from zeus.api.resources.base import ApiHelpers
from zeus.config import nplusone
from zeus.constants import Permission
from zeus.exceptions import ApiError
from zeus.models import Hook
class BaseHook(View, ApiHelpers):
public = False
methods = ["GET", "POST", "PUT", "DELETE"]
def dispatch_request(self, hook_id, signature=None, *args, **kwargs) -> Response:
current_app.logger.info("received webhook id=%s", hook_id)
with nplusone.ignore("eager_load"):
hook = (
Hook.query.unrestricted_unsafe()
.options(joinedload("repository"))
.get(hook_id)
)
if not hook:
return self.respond({"message": "hook not found"}, 404)
if not self.public and not hook.is_valid_signature(signature):
current_app.logger.warn("invalid webhook signature id=%s", hook_id)
return self.respond({"message": "hook not found"}, 404)
try:
method = getattr(self, request.method.lower())
except AttributeError:
current_app.logger.warn(
"invalid webhook method id=%s, method=%s", hook_id, request.method
)
return self.respond({"message": "resource not found"}, 405)
auth.set_current_tenant(
auth.RepositoryTenant(
repository_id=hook.repository_id, permission=Permission.admin
)
)
try:
resp = method(hook, *args, **kwargs)
except ApiError as exc:
return self.respond(exc.json or {}, exc.code or 500)
if isinstance(resp, Response):
return resp
return self.respond(resp)
def respond(self, context: dict = {}, status: int = 200) -> Response:
resp = jsonify(context)
resp.status_code = status
return resp
|
demo/dygraph/quant/optimizer.py | ZichaoGuo/PaddleSlim | 926 | 12727044 | <filename>demo/dygraph/quant/optimizer.py
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
def piecewise_decay(net, device_num, args):
step = int(
math.ceil(float(args.total_images) / (args.batch_size * device_num)))
bd = [step * e for e in args.step_epochs]
lr = [args.lr * (0.1**i) for i in range(len(bd) + 1)]
learning_rate = paddle.optimizer.lr.PiecewiseDecay(
boundaries=bd, values=lr, verbose=False)
optimizer = paddle.optimizer.Momentum(
parameters=net.parameters(),
learning_rate=learning_rate,
momentum=args.momentum_rate,
weight_decay=paddle.regularizer.L2Decay(args.l2_decay))
return optimizer, learning_rate
def cosine_decay(net, device_num, args):
step = int(
math.ceil(float(args.total_images) / (args.batch_size * device_num)))
learning_rate = paddle.optimizer.lr.CosineAnnealingDecay(
learning_rate=args.lr, T_max=step * args.num_epochs, verbose=False)
optimizer = paddle.optimizer.Momentum(
parameters=net.parameters(),
learning_rate=learning_rate,
momentum=args.momentum_rate,
weight_decay=paddle.regularizer.L2Decay(args.l2_decay))
return optimizer, learning_rate
def create_optimizer(net, device_num, args):
if args.lr_strategy == "piecewise_decay":
return piecewise_decay(net, device_num, args)
elif args.lr_strategy == "cosine_decay":
return cosine_decay(net, device_num, args)
|
autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py | mens-artis/Auto-PyTorch | 1,657 | 12727094 | <gh_stars>1000+
from autoPyTorch.core.autonet_classes.autonet_image_classification import AutoNetImageClassification
class AutoNetImageClassificationMultipleDatasets(AutoNetImageClassification):
preset_folder_name = "image_classification_multiple_datasets"
@classmethod
def get_default_pipeline(cls):
from autoPyTorch.pipeline.base.pipeline import Pipeline
from autoPyTorch.pipeline.nodes.image.optimization_algorithm_no_timelimit import OptimizationAlgorithmNoTimeLimit
from autoPyTorch.pipeline.nodes.optimizer_selector import OptimizerSelector
from autoPyTorch.pipeline.nodes.log_functions_selector import LogFunctionsSelector
from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector
from autoPyTorch.pipeline.nodes.image.simple_scheduler_selector import SimpleLearningrateSchedulerSelector
from autoPyTorch.pipeline.nodes.image.cross_validation_indices import CrossValidationIndices
from autoPyTorch.pipeline.nodes.image.autonet_settings_no_shuffle import AutoNetSettingsNoShuffle
from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo
from autoPyTorch.pipeline.nodes.image.loss_module_selector_indices import LossModuleSelectorIndices
from autoPyTorch.pipeline.nodes.image.image_augmentation import ImageAugmentation
from autoPyTorch.pipeline.nodes.image.create_image_dataloader import CreateImageDataLoader
from autoPyTorch.pipeline.nodes.image.create_dataset_info import CreateDatasetInfo
from autoPyTorch.pipeline.nodes.image.simple_train_node import SimpleTrainNode
from autoPyTorch.pipeline.nodes.image.multiple_datasets import MultipleDatasets
from autoPyTorch.pipeline.nodes.image.image_dataset_reader import ImageDatasetReader
# build the pipeline
pipeline = Pipeline([
AutoNetSettingsNoShuffle(),
OptimizationAlgorithmNoTimeLimit([
MultipleDatasets([
ImageDatasetReader(),
CreateDatasetInfo(),
CrossValidationIndices([
NetworkSelectorDatasetInfo(),
OptimizerSelector(),
SimpleLearningrateSchedulerSelector(),
LogFunctionsSelector(),
MetricSelector(),
LossModuleSelectorIndices(),
ImageAugmentation(),
CreateImageDataLoader(),
SimpleTrainNode()
])
])
])
])
cls._apply_default_pipeline_settings(pipeline)
return pipeline
|
Wrapping/Python/vtkmodules/test/ErrorObserver.py | cclauss/VTK | 1,755 | 12727103 | <gh_stars>1000+
from vtkmodules.vtkCommonCore import vtkCommand
class vtkErrorObserver(object):
def __init__(self):
self.CallDataType = 'string0'
self.reset()
def __call__(self, caller, event, data):
if event == 'ErrorEvent':
self._error_message = data
elif event == 'WarningEvent':
self._warning_message = data
def _check(self, seen, actual, expect, what):
if seen:
if actual.find(expect) == -1:
msg = 'ERROR: %s message does not contain "%s" got \n"%s"' \
% (what, expect, self.error_message)
raise RuntimeError(msg)
else:
what = what.lower()
msg = 'ERROR: Failed to catch any %s. ' \
'Expected the %s message to contain "%s"' \
% (what, what, expect)
raise RuntimeError(msg)
self.reset()
def check_error(self, expect):
self._check(self.saw_error, self.error_message, expect, 'Error')
def check_warning(self, expect):
self._check(self.saw_warning, self.warning_message, expect, 'Warning')
def reset(self):
self._error_message = None
self._warning_message = None
@property
def saw_error(self):
return self._error_message is not None
@property
def error_message(self):
return self._error_message
@property
def saw_warning(self):
return self._warning_message is not None
@property
def warning_message(self):
return self._warning_message
|
cort/coreference/multigraph/features.py | leonardoboliveira/cort | 141 | 12727104 | <gh_stars>100-1000
import re
from cort.core import external_data
from cort.core import spans
from cort.core import util
__author__ = 'smartschat'
def not_singleton(anaphor, antecedent):
singleton_data = external_data.SingletonMentions.get_instance()
anaphor = " ".join(anaphor.attributes["tokens"])
antecedent = " ".join(antecedent.attributes["tokens"])
if (anaphor in singleton_data.singletons and
singleton_data.singletons[anaphor] >= 25):
return True
if (antecedent in singleton_data.singletons and
singleton_data.singletons[antecedent] >= 25):
return True
def pronoun_parallelism(anaphor, antecedent):
return (anaphor.attributes["type"] == "PRO"
and (anaphor.attributes["citation_form"]
in ["he", "she", "it", "they"])
and (antecedent.attributes["type"] != "PRO"
or (antecedent.attributes["citation_form"]
in ["he", "she", "it", "they"]))
and (antecedent.attributes["grammatical_function"] ==
anaphor.attributes["grammatical_function"])
and (antecedent.attributes["grammatical_function"]
in ["SUBJECT", "OBJECT"]))
def antecedent_is_subject(anaphor, antecedent):
return (anaphor.attributes["type"] == "PRO"
and (anaphor.attributes["citation_form"]
in ["he", "she", "it", "they"])
and (antecedent.attributes["type"] != "PRO"
or (antecedent.attributes["citation_form"]
in ["he", "she", "it", "they"]))
and antecedent.attributes["grammatical_function"] == "SUBJECT")
def antecedent_is_object(anaphor, antecedent):
return (anaphor.attributes["type"] == "PRO"
and (anaphor.attributes["citation_form"]
in ["he", "she", "it", "they"])
and (antecedent.attributes["type"] != "PRO"
or (antecedent.attributes["citation_form"]
in ["he", "she", "it", "they"]))
and antecedent.attributes["grammatical_function"] == "OBJECT")
def anaphor_pronoun(anaphor, antecedent):
return (anaphor.attributes["type"] == "PRO"
and (anaphor.attributes["citation_form"]
in ["he", "she", "it", "they"])
and (antecedent.attributes["type"] != "PRO"
or (antecedent.attributes["citation_form"]
in ["he", "she", "it", "they"])))
def lexical(anaphor, antecedent):
lexical_data = external_data.LexicalData.get_instance()
if ((anaphor.attributes["type"] == "NAM"
and antecedent.attributes["type"] == "NAM")
or (anaphor.attributes["type"] == "NOM"
and anaphor.attributes["fine_type"] == "DEF"
and antecedent.attributes["type"] in ["NAM", "NOM"])):
return lexical_data.look_up(anaphor, antecedent)
def non_pronominal_string_match(anaphor, antecedent):
if anaphor.attributes["type"] in ["PRO", "DEM", "VRB"]:
return False
elif antecedent.attributes["type"] in ["PRO", "DEM", "VRB"]:
return False
else:
return (" ".join(util.clean_via_pos(anaphor.attributes["tokens"],
anaphor.attributes["pos"])).lower()
== " ".join(util.clean_via_pos(
antecedent.attributes["tokens"],
antecedent.attributes["pos"])).lower())
def head_match(anaphor, antecedent):
if anaphor.attributes["type"] in ["PRO", "DEM", "VRB"]:
return False
elif antecedent.attributes["type"] in ["PRO", "DEM", "VRB"]:
return False
elif (anaphor.attributes["semantic_class"] == "NUMERIC" or
antecedent.attributes["semantic_class"] == "NUMERIC"):
return False
else:
return (anaphor.attributes["head"] != ["and"] and
(" ".join(anaphor.attributes["head"]).lower()
== " ".join(antecedent.attributes["head"]).lower()))
def substring(anaphor, antecedent):
if anaphor.attributes["type"] in ["PRO", "DEM", "VRB"]:
return False
elif antecedent.attributes["type"] != "NAM":
return False
elif (anaphor.attributes["semantic_class"] == "NUMERIC" or
antecedent.attributes["semantic_class"] == "NUMERIC"):
return False
elif anaphor.attributes["head"] == ["and"]:
return False
else:
cleaned = util.clean_via_pos(
anaphor.attributes["tokens"],
anaphor.attributes["pos"])
return (" ".join(cleaned).lower()
in " ".join(antecedent.attributes["tokens"]).lower())
def pronoun_same_canonical_form(anaphor, antecedent):
return (anaphor.attributes["type"] == "PRO"
and antecedent.attributes["type"] == "PRO"
and (anaphor.attributes["citation_form"] ==
antecedent.attributes["citation_form"]))
def speaker(anaphor, antecedent):
speaker_anaphor = anaphor.attributes["speaker"]
speaker_antecedent = antecedent.attributes["speaker"]
if speaker_anaphor == "-" and speaker_antecedent == "-":
return False
else:
if (anaphor.attributes["type"] == "PRO"
and antecedent.attributes["type"] == "PRO"):
if (anaphor.attributes["citation_form"] == "i"
and antecedent.attributes["citation_form"] == "i"):
return speaker_anaphor == speaker_antecedent
elif ((anaphor.attributes["citation_form"] == "i"
and antecedent.attributes["citation_form"] == "you")
or (anaphor.attributes["citation_form"] == "you"
and antecedent.attributes["citation_form"] == "i")):
return (nothing_between(anaphor, antecedent)
and speaker_anaphor != speaker_antecedent)
elif (anaphor.attributes["type"] == "PRO"
or antecedent.attributes["type"] == "PRO"):
if (anaphor.attributes["type"] == "PRO"
and anaphor.attributes["citation_form"] == "i"):
return (speaker_anaphor.replace("_", " ").lower() in
[" ".join(antecedent.attributes["tokens"]).lower(),
" ".join(antecedent.attributes["head"]).lower()])
elif (antecedent.attributes["type"] == "PRO"
and antecedent.attributes["citation_form"] == "i"):
return (speaker_antecedent.replace("_", " ").lower() in
[" ".join(anaphor.attributes["tokens"]).lower(),
" ".join(anaphor.attributes["head"]).lower()])
def nothing_between(anaphor, antecedent):
if not anaphor.document:
return True
if anaphor.span < antecedent.span:
start = anaphor.span.begin
end = antecedent.span.end
else:
start = antecedent.span.begin
end = anaphor.span.end
speakers = anaphor.document.speakers[start:end+1]
allowed_speakers = [speakers[0], speakers[-1]]
for particular_speaker in speakers:
if particular_speaker not in allowed_speakers:
return False
return True
def not_anaphoric(anaphor, antecedent):
return not (anaphor.attributes["type"] in ["NAM", "PRO"]
or (anaphor.attributes["type"] == "NOM"
and anaphor.attributes["fine_type"] == "DEF"))
def not_speaker(anaphor, antecedent):
speaker_anaphor = anaphor.attributes["speaker"]
speaker_antecedent = antecedent.attributes["speaker"]
if speaker_anaphor == "-" or speaker_antecedent == "-":
return False
else:
if (anaphor.attributes["type"] == "PRO"
and antecedent.attributes["type"] == "PRO"):
if ((anaphor.attributes["citation_form"] == "i"
and antecedent.attributes["citation_form"] == "i")
or (anaphor.attributes["citation_form"] == "we"
and antecedent.attributes["citation_form"] == "we")
or (anaphor.attributes["citation_form"] == "you"
and antecedent.attributes["citation_form"] == "you")):
return speaker_anaphor != speaker_antecedent
elif ((anaphor.attributes["citation_form"] == "i"
and antecedent.attributes["citation_form"] == "you")
or (anaphor.attributes["citation_form"] == "you"
and antecedent.attributes["citation_form"] == "i")):
return speaker_anaphor == speaker_antecedent
def not_pronoun_distance(anaphor, antecedent):
return (anaphor.attributes["type"] == "PRO"
and anaphor.attributes["citation_form"] == "it"
and (anaphor.attributes["sentence_id"]
- antecedent.attributes["sentence_id"] > 1))
def not_embedding(anaphor, antecedent):
return (antecedent.span.embeds(anaphor.span)
and (anaphor.attributes["fine_type"]
not in ["REFL", "POSS", "POSS_ADJ"]))
def not_compatible(anaphor, antecedent):
if (" ".join(util.clean_via_pos(anaphor.attributes["tokens"],
anaphor.attributes["pos"])).lower() ==
" ".join(util.clean_via_pos(antecedent.attributes["tokens"],
antecedent.attributes["pos"])).lower()):
return False
gender = (anaphor.attributes["gender"] == "UNKNOWN"
or antecedent.attributes["gender"] == "UNKNOWN"
or anaphor.attributes["gender"]
== antecedent.attributes["gender"])
number = (anaphor.attributes["number"] == "UNKNOWN"
or antecedent.attributes["number"] == "UNKNOWN"
or anaphor.attributes["number"]
== antecedent.attributes["number"])
semantic_class = (anaphor.attributes["semantic_class"] == "UNKNOWN"
or antecedent.attributes["semantic_class"] == "UNKNOWN"
or anaphor.attributes["semantic_class"]
== antecedent.attributes["semantic_class"])
return not (gender and number and semantic_class)
def not_modifier(anaphor, antecedent):
if (anaphor.attributes["type"] == "NAM"
and antecedent.attributes["type"] == "NAM"):
return False
elif (anaphor.attributes["type"] in ["PRO", "DEM", "VRB"]
or antecedent.attributes["type"] in ["PRO", "DEM", "VRB"]):
return False
else:
return not get_modifier(anaphor).issubset(get_modifier(antecedent))
def get_modifier(mention):
head_span_in_mention = spans.Span(
mention.attributes["head_span"].begin - mention.span.begin,
mention.attributes["head_span"].end - mention.span.begin)
modifiers = set()
for index, (token, pos) in enumerate(
zip(mention.attributes["tokens"], mention.attributes["pos"])):
if (token.lower() not in ["the", "this", "that", "those", "these",
"a", "an"]
and pos not in ["POS", "IN"]
and (index < head_span_in_mention.begin
or index > head_span_in_mention.end)):
modifiers.add(token.lower())
return modifiers
def alias(anaphor, antecedent):
if (anaphor.attributes["type"] != "NAM"
or antecedent.attributes["type"] != "NAM"):
return False
elif (" ".join(anaphor.attributes["head"]).lower()
== " ".join(antecedent.attributes["head"]).lower()):
return False
else:
anaphor_cleaned_tokens = anaphor.attributes["head"]
antecedent_cleaned_tokens = antecedent.attributes["head"]
category = get_category_for_alias(
anaphor.attributes["ner"][anaphor.attributes["head_index"]],
antecedent.attributes["ner"][antecedent.attributes["head_index"]])
if category == "PERSON":
return person_alias(anaphor_cleaned_tokens,
antecedent_cleaned_tokens)
elif category == "LOC":
return loc_alias(anaphor_cleaned_tokens, antecedent_cleaned_tokens)
elif category == "ORG":
return org_alias(anaphor_cleaned_tokens, antecedent_cleaned_tokens)
else:
return False
def get_category_for_alias(anaphor_ner, antecedent_ner):
if anaphor_ner == "PERSON" and antecedent_ner == "PERSON":
return "PERSON"
elif re.match(r"LOC", anaphor_ner) and re.match(r"LOC", antecedent_ner):
return "LOC"
elif re.match(r"ORG", anaphor_ner) and re.match(r"(ORG)", antecedent_ner):
return "ORG"
def loc_alias(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
return (starts_with(anaphor_cleaned_tokens, antecedent_cleaned_tokens)
or is_abbreviation(anaphor_cleaned_tokens,
antecedent_cleaned_tokens))
def org_alias(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
return (starts_with(anaphor_cleaned_tokens, antecedent_cleaned_tokens)
or is_abbreviation(anaphor_cleaned_tokens,
antecedent_cleaned_tokens))
def person_alias(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
if len(anaphor_cleaned_tokens) == 1 or len(antecedent_cleaned_tokens) == 1:
return (anaphor_cleaned_tokens[0] == antecedent_cleaned_tokens[0]
or anaphor_cleaned_tokens[-1] == antecedent_cleaned_tokens[-1])
elif (len(anaphor_cleaned_tokens) == 2
and anaphor_cleaned_tokens[0].lower() in ["mr", "ms", "mr.", "ms."]
or len(antecedent_cleaned_tokens) == 2
and antecedent_cleaned_tokens[0].lower() in ["mr", "ms", "mr.",
"ms."]):
return anaphor_cleaned_tokens[-1] == antecedent_cleaned_tokens[-1]
elif (anaphor_cleaned_tokens[0] == antecedent_cleaned_tokens[0]
and anaphor_cleaned_tokens[-1] == antecedent_cleaned_tokens[-1]):
return True
elif len(anaphor_cleaned_tokens) > 1 and len(antecedent_cleaned_tokens) > 1:
return (anaphor_cleaned_tokens[-1] == antecedent_cleaned_tokens[-1]
and anaphor_cleaned_tokens[-2] == antecedent_cleaned_tokens[-2])
return False
def starts_with(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
for ana_token, ante_token in zip(anaphor_cleaned_tokens,
antecedent_cleaned_tokens):
if ana_token != ante_token:
return False
return True
def is_abbreviation(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
if (" ".join(anaphor_cleaned_tokens).replace(".", "")
== " ".join(antecedent_cleaned_tokens).replace(".", "")):
return True
else:
if len(anaphor_cleaned_tokens) > len(antecedent_cleaned_tokens):
return (" ".join(antecedent_cleaned_tokens)
in set(get_acronyms(anaphor_cleaned_tokens)))
else:
return (" ".join(anaphor_cleaned_tokens)
in set(get_acronyms(antecedent_cleaned_tokens)))
def get_acronyms(cleaned_tokens):
company_designator = r'assoc|bros|co|coop|corp|devel|inc|llc|ltd\.?'
tokens_without_designator = [token for token in cleaned_tokens
if not re.match(company_designator,
token.lower())]
return (" ".join(tokens_without_designator),
"".join([token[0] for token in tokens_without_designator
if token[0].isupper()]),
".".join([token[0] for token in tokens_without_designator
if token[0].isupper()])+".")
|
server.py | tongplw/SentimentAnalysis | 298 | 12727139 | <filename>server.py
from flask import Flask, jsonify, request
from flask_cors import CORS
import torch
from transformers import AutoTokenizer, AutoConfig
from modeling import BertForSentimentClassification, AlbertForSentimentClassification, DistilBertForSentimentClassification
from arguments import args
app = Flask(__name__)
app.config.from_object(__name__)
# Enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
def classify_sentiment(sentence):
with torch.no_grad():
tokens = tokenizer.tokenize(sentence)
tokens = ['[CLS]'] + tokens + ['[SEP]']
tokens_ids = tokenizer.convert_tokens_to_ids(tokens)
seq = torch.tensor(tokens_ids)
seq = seq.unsqueeze(0)
attn_mask = (seq != 0).long()
logit = model(seq, attn_mask)
prob = torch.sigmoid(logit.unsqueeze(-1))
prob = prob.item()
soft_prob = prob > 0.5
if soft_prob == 1:
return 'Positive', int(prob*100)
else:
return 'Negative', int(100-prob*100)
@app.route('/', methods=['GET'])
def sentiment():
if request.method == 'GET':
text = request.args['text']
sentiment, probability = classify_sentiment(text)
return jsonify({'sentiment': sentiment, 'probability': probability})
if __name__ == '__main__':
if args.model_name_or_path is None:
args.model_name_or_path = 'barissayil/bert-sentiment-analysis-sst'
#Configuration for the desired transformer model
config = AutoConfig.from_pretrained(args.model_name_or_path)
#Create the model with the desired transformer model
if config.model_type == 'bert':
model = BertForSentimentClassification.from_pretrained(args.model_name_or_path)
elif config.model_type == 'albert':
model = AlbertForSentimentClassification.from_pretrained(args.model_name_or_path)
elif config.model_type == 'distilbert':
model = DistilBertForSentimentClassification.from_pretrained(args.model_name_or_path)
else:
raise ValueError('This transformer model is not supported yet.')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval()
#Initialize the tokenizer for the desired transformer model
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
#Run the Flask App
app.run() |
glumpy/app/parser.py | Frekby/glumpy | 1,074 | 12727166 | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
Default argument parser for any glumpy program.
"""
import argparse
import glumpy.defaults
# Default parser
__parser__ = None
def get_default():
""" Get the default parser. """
global __parser__
if __parser__ is None:
__parser__ = argparse.ArgumentParser()
set_default_options(__parser__)
return __parser__
def get_options():
""" Parse and retrun command line options. """
options, unknown = get_default().parse_known_args()
return options
def set_default_options(parser):
""" Set parser default options. """
# Backend option
parser.add_argument("--backend", "-b",
default = glumpy.defaults.backend(),
choices = ('glfw', 'sdl2', 'qt5', 'pyside',
'pyglet', 'sdl', 'freeglut', 'osxglut'),
help="Backend to use, one of ")
# Record
parser.add_argument("--record",
action='store_true',
help='Record a movie (default is "movie.mp4")')
# Interactive mode
parser.add_argument("--interactive", "-i",
action='store_true',
help="Interactive mode")
# Framerate option
parser.add_argument("--framerate", "-f",
default=60,
type=int,
help="Framerate in frames/second")
# Display framerate option
parser.add_argument("--display-fps",
action='store_true',
help="Display framerate in the console")
# Framerate option
parser.add_argument("--debug", "-d",
action='store_true',
help="Verbose debug mode")
# Window size
parser.add_argument("--size", "-s",
default = "",
type=str,
help="Window size")
# Window position
parser.add_argument("--position", "-p",
default = "",
type=str,
help="Window position")
# Single buffer
parser.add_argument("--single-buffer",
action='store_true',
help="Single buffer mode")
# Stereo mode
parser.add_argument("--stereo",
action='store_true',
help="Stereo mode")
# vertical synchronization
parser.add_argument("--vsync",
default=False,
type=bool,
help="Enable/disable vertical synchronization")
# sRGB mode
parser.add_argument("--srgb",
action='store_true',
help="sRGB mode (gamma correction)")
# Depth buffer size
parser.add_argument("--depth-size",
default=16,
type=int,
help="Depth buffer size")
# Stencil buffer size
parser.add_argument("--stencil-size",
default=0,
type=int,
help="Stencil buffer size")
# GL API
parser.add_argument("--gl-api",
default="GL",
choices=["GL","ES"],
help="GL API")
# GL profile
parser.add_argument("--gl-profile",
default="none",
choices=["none","core", "compatibility"],
help="GL context profile (only relevant for GL > 3.0)")
# GL version
parser.add_argument("--gl-version",
default="2.1",
help="GL version")
|
src/ddpg/actor_net.py | jimkon/Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces | 154 | 12727188 | import numpy as np
import tensorflow as tf
import math
LEARNING_RATE = 0.0001
BATCH_SIZE = 64
TAU = 0.001
class ActorNet:
""" Actor Network Model of DDPG Algorithm """
def __init__(self, num_states, num_actions):
self.g = tf.Graph()
with self.g.as_default():
self.sess = tf.InteractiveSession()
# actor network model parameters:
self.W1_a, self.B1_a, self.W2_a, self.B2_a, self.W3_a, self.B3_a,\
self.actor_state_in, self.actor_model = self.create_actor_net(
num_states, num_actions)
# target actor network model parameters:
self.t_W1_a, self.t_B1_a, self.t_W2_a, self.t_B2_a, self.t_W3_a, self.t_B3_a,\
self.t_actor_state_in, self.t_actor_model = self.create_actor_net(
num_states, num_actions)
# cost of actor network:
# gets input from action_gradient computed in critic network file
self.q_gradient_input = tf.placeholder("float", [None, num_actions])
self.actor_parameters = [self.W1_a, self.B1_a,
self.W2_a, self.B2_a, self.W3_a, self.B3_a]
self.parameters_gradients = tf.gradients(
self.actor_model, self.actor_parameters, -self.q_gradient_input) # /BATCH_SIZE)
self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(
zip(self.parameters_gradients, self.actor_parameters))
# initialize all tensor variable parameters:
self.sess.run(tf.global_variables_initializer())
self.update_target_actor_op = [
self.t_W1_a.assign(TAU * self.W1_a + (1 - TAU) * self.t_W1_a),
self.t_B1_a.assign(TAU * self.B1_a + (1 - TAU) * self.t_B1_a),
self.t_W2_a.assign(TAU * self.W2_a + (1 - TAU) * self.t_W2_a),
self.t_B2_a.assign(TAU * self.B2_a + (1 - TAU) * self.t_B2_a),
self.t_W3_a.assign(TAU * self.W3_a + (1 - TAU) * self.t_W3_a),
self.t_B3_a.assign(TAU * self.B3_a + (1 - TAU) * self.t_B3_a)]
# To make sure actor and target have same intial parmameters copy the parameters:
# copy target parameters
self.sess.run([
self.t_W1_a.assign(self.W1_a),
self.t_B1_a.assign(self.B1_a),
self.t_W2_a.assign(self.W2_a),
self.t_B2_a.assign(self.B2_a),
self.t_W3_a.assign(self.W3_a),
self.t_B3_a.assign(self.B3_a)])
def create_actor_net(self, num_states=4, num_actions=1):
""" Network that takes states and return action """
N_HIDDEN_1 = 400
N_HIDDEN_2 = 300
actor_state_in = tf.placeholder("float", [None, num_states])
W1_a = tf.Variable(tf.random_uniform(
[num_states, N_HIDDEN_1], -1 / math.sqrt(num_states), 1 / math.sqrt(num_states)))
B1_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_1], -1 / math.sqrt(num_states), 1 / math.sqrt(num_states)))
W2_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_1, N_HIDDEN_2], -1 / math.sqrt(N_HIDDEN_1), 1 / math.sqrt(N_HIDDEN_1)))
B2_a = tf.Variable(tf.random_uniform(
[N_HIDDEN_2], -1 / math.sqrt(N_HIDDEN_1), 1 / math.sqrt(N_HIDDEN_1)))
W3_a = tf.Variable(tf.random_uniform([N_HIDDEN_2, num_actions], -0.003, 0.003))
B3_a = tf.Variable(tf.random_uniform([num_actions], -0.003, 0.003))
H1_a = tf.nn.softplus(tf.matmul(actor_state_in, W1_a) + B1_a)
H2_a = tf.nn.tanh(tf.matmul(H1_a, W2_a) + B2_a)
actor_model = tf.matmul(H2_a, W3_a) + B3_a
return W1_a, B1_a, W2_a, B2_a, W3_a, B3_a, actor_state_in, actor_model
def evaluate_actor(self, state_t):
return self.sess.run(self.actor_model, feed_dict={self.actor_state_in: state_t})
def evaluate_target_actor(self, state_t_1):
return self.sess.run(self.t_actor_model, feed_dict={self.t_actor_state_in: state_t_1})
def train_actor(self, actor_state_in, q_gradient_input):
self.sess.run(self.optimizer, feed_dict={
self.actor_state_in: actor_state_in, self.q_gradient_input: q_gradient_input})
def update_target_actor(self):
self.sess.run(self.update_target_actor_op)
|
templates/powershell/ps_external_ip.py | ohoph/Ebowla | 738 | 12727190 | buildcode="""
function Get-ExternalIP(){
$extern_ip_mask = @()
while ($response.IPAddress -eq $null){
$response = Resolve-DnsName -Name myip.opendns.com -Server resolver1.opendns.com
Start-Sleep -s 1
}
$octet1, $octet2, $octet3, $octet4 = $response.IPAddress.Split(".")
$extern_ip_mask += $response.IPAddress
$extern_ip_mask += [string]$octet1 + "." + [string]$octet2 + "." + [string]$octet3 + ".0"
$extern_ip_mask += [string]$octet1 + "." + [string]$octet2 + ".0.0"
$extern_ip_mask += [string]$octet1 + ".0.0.0"
return $extern_ip_mask
}
"""
callcode="""
$key_combos += ,(Get-ExternalIP)
""" |
transformers4rec/data/testing/tabular_data/dataset.py | Jwmc999/Transformers4Rec | 415 | 12727205 | import pathlib
from transformers4rec.data.dataset import ParquetDataset
tabular_testing_data: ParquetDataset = ParquetDataset(pathlib.Path(__file__).parent)
|
src/ai-examples/azext_ai_examples/_help.py | Mannan2812/azure-cli-extensions | 207 | 12727229 | <filename>src/ai-examples/azext_ai_examples/_help.py
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
helps['ai-examples'] = """
type: group
short-summary: Add AI powered examples to help content.
"""
helps['ai-examples check-connection'] = """
type: command
short-summary: Check if the client can connect to the AI example service.
"""
|
hover/core/representation/manifold.py | haochuanwei/hover | 251 | 12727234 | <reponame>haochuanwei/hover
"""
Manifold similarity measures for any collection of sequences of vectors.
Can be useful for improved interpretability of neural nets.
"""
from .reduction import DimensionalityReducer
from tqdm import tqdm
from scipy.spatial import procrustes
from hover.core import Loggable
DEFAULT_UMAP_PARAMS = {
"n_components": 2,
"n_neighbors": 30,
"min_dist": 0.1,
"metric": "euclidean",
"random_state": 0,
"transform_seed": 0,
}
class LayerwiseManifold(Loggable):
"""
Takes a sequence of arrays (each row of the array is a vector) and does the following:
(1) unfold vectors into lower dimensions, typically 2D or 3D;
(2) for every array:
run Procrustes analysis for fitting to the previous array. The first array is fitted to itself.
"""
def __init__(self, seq_arr):
"""
:param seq_arr: sequence of arrays to fit the manifold with.
:type seq_arr: list of numpy.ndarrays.
"""
self.arrays = seq_arr[:]
self.validate()
self.standardize()
def validate(self):
"""
Sanity check of array dimensions.
"""
assert (
len(self.arrays) > 1
), "Need at least two arrays to compute layerwise manifold."
self.n_vecs = self.arrays[0].shape[0]
for _arr in self.arrays:
assert _arr.shape[0] == self.n_vecs
self._good("Validated dimensions of input arrays")
def standardize(self):
"""
Standardize each array to the Procrustes form where
- tr(A^T A) = 1
- A.mean(axis=0) = 0
"""
def transform(arr):
matrix, _, _ = procrustes(arr, arr)
return matrix
self.arrays = [transform(_arr) for _arr in self.arrays]
self._good("Standardized input arrays")
def unfold(self, method="umap", reducer_kwargs=None):
"""
Compute lower-dimensional manifolds using UMAP.
:param method: the dimensionality reduction method to use.
:type method: str
"""
assert method in {"umap", "ivis"}
if method == "umap":
reducer_kwargs = reducer_kwargs or DEFAULT_UMAP_PARAMS
else:
reducer_kwargs = reducer_kwargs or dict()
self.manifolds = []
self._info(f"Running {method}...")
for _arr in tqdm(self.arrays, total=len(self.arrays)):
_reducer = DimensionalityReducer(_arr)
_manifold = _reducer.fit_transform(method, **reducer_kwargs)
self.manifolds.append(_manifold)
self._good("Successfully unfolded arrays into manifolds")
def procrustes(self, arrays=None):
"""
Run Procrustes analysis, optionally on a specified list of arrays.
"""
if arrays is None:
arrays = self.manifolds
disparities = []
fit_arrays = []
# fit each array to its fitted predecessor
for i, _arr in enumerate(arrays):
if i == 0:
# fit the first array to itself
_, _matrix, _disparity = procrustes(_arr, _arr)
else:
_, _matrix, _disparity = procrustes(fit_arrays[i - 1], _arr)
disparities.append(_disparity)
fit_arrays.append(_matrix)
self._good("Successfully carried out Procrustes analysis")
return fit_arrays, disparities
|
Operators/ExampleFaceParsingOperator/FaceParsingOperator.py | Caius-Lu/Savior | 108 | 12727236 | from abc import ABC
import cv2
import numpy as np
from Operators.DummyAlgorithmWithModel import DummyAlgorithmWithModel
from Utils.GeometryUtils import center_pad_image_with_specific_base, \
resize_with_long_side, force_convert_image_to_bgr, correct_face_orientation
from Utils.InferenceHelpers import TritonInferenceHelper
class FaceParsingOperator(DummyAlgorithmWithModel, ABC):
name = 'FaceParsing'
__version__ = 'v1.0.20210323'
def __init__(self, _inference_config, _is_test):
super().__init__(_inference_config, _is_test)
class GeneralFaceParsing(FaceParsingOperator):
"""
获取人脸面部分区,除了面部区域,其他地方准确率很低
例如耳环、眼镜等
"""
name = '自然场景下基于BiSeNet人脸面部的语义分割'
__version__ = 'v1.0.20210323'
def __init__(self, _inference_config, _is_test):
"""
每个下标对应的意思
0 背景
1 皮肤区域
2 右眉毛
3 左眉毛
4 右眼睛
5 左眼睛
6 眼镜
7 右耳朵
8 左耳朵
9 耳环
10 鼻子
11 口腔
12 上嘴唇
13 下嘴唇
14 颈部
15
16 衣服
17 头发
18 帽子
"""
super().__init__(_inference_config, _is_test)
# 模型未限制,但是为了保证效率,将图像都统一到512
self.candidate_image_size = (512, 512)
def get_inference_helper(self):
if self.inference_config['name'] == 'triton':
inference_helper = TritonInferenceHelper('FaceParsing',
self.inference_config['triton_url'],
self.inference_config['triton_port'],
'FaceParsing', 1)
inference_helper.add_image_input('INPUT__0', (512, 512, 3), '检测用rgb的图像',
([103.53, 116.28, 123.675], [57.375, 57.12, 58.395]))
inference_helper.add_output('OUTPUT__0', (19, 512, 512), '每个类别的区域')
self.inference_helper = inference_helper
else:
raise NotImplementedError(
f"{self.inference_config['name']} helper for face parsing not implement")
def execute(self, _image, _landmark_info=None):
to_return_result = {
'semantic_segmentation': np.zeros((_image.shape[1], _image.shape[0]), dtype=np.uint8),
}
if _landmark_info is not None:
corrected_face_image, rotate_back_function = correct_face_orientation(_image, _landmark_info)
else:
corrected_face_image = _image
def _rotate_back_function(_image):
return _image
rotate_back_function = _rotate_back_function
original_h, original_w = corrected_face_image.shape[:2]
resized_image = resize_with_long_side(corrected_face_image, 512)
resized_h, resized_w = resized_image.shape[:2]
padded_image, (width_pad_ratio, height_pad_ratio) = center_pad_image_with_specific_base(
resized_image,
_width_base=512,
_height_base=512,
_output_pad_ratio=True
)
candidate_image = cv2.cvtColor(force_convert_image_to_bgr(padded_image), cv2.COLOR_BGR2RGB)
candidate_h, candidate_w = candidate_image.shape[:2]
if isinstance(self.inference_helper, TritonInferenceHelper):
result = self.inference_helper.infer(_need_tensor_check=False, INPUT__0=candidate_image.astype(np.float32))
semantic_index = result['OUTPUT__0'].squeeze()
else:
raise NotImplementedError(
f"{self.inference_helper.type_name} helper for face parsing not implement")
left_width_pad = int(width_pad_ratio * candidate_w)
top_height_pad = int(height_pad_ratio * candidate_h)
# 去除pad
semantic_index_without_pad = semantic_index[
top_height_pad:top_height_pad + resized_h,
left_width_pad:left_width_pad + resized_w
]
# 恢复resize
resize_back_semantic_index = cv2.resize(semantic_index_without_pad, (original_w, original_h),
interpolation=cv2.INTER_NEAREST)
# 恢复图像方向
original_orientation_semantic_index = rotate_back_function(resize_back_semantic_index)
to_return_result['semantic_segmentation'] = original_orientation_semantic_index
return to_return_result
if __name__ == '__main__':
from argparse import ArgumentParser
from Utils.AnnotationTools import annotate_segmentation
from Operators.ExampleFaceDetectOperator import GeneralUltraLightFaceDetect
from Operators.ExampleFaceAlignmentOperator import GeneralLandmark106p
from Utils.GeometryUtils import get_rotated_box_roi_from_image
ag = ArgumentParser('Face Parsing Example')
ag.add_argument('-i', '--image_path', dest='image_path', type=str, required=True, help='本地图像路径')
ag.add_argument('-u', '--triton_url', dest='triton_url', type=str, required=True, help='triton url')
ag.add_argument('-p', '--triton_port', dest='triton_port', type=int, default=8001, help='triton grpc 端口')
args = ag.parse_args()
# 假设图中只有一个人头
img = cv2.imread(args.image_path)
face_parsing_handler = GeneralFaceParsing({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True)
ultra_light_face_detect_handler = GeneralUltraLightFaceDetect({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True, 0.7, 0.5)
landmark106p_detect_handler = GeneralLandmark106p({
'name': 'triton',
'triton_url': args.triton_url,
'triton_port': args.triton_port
}, True)
face_bbox = ultra_light_face_detect_handler.execute(img)['locations'][0]['box_info']
cropped_image = get_rotated_box_roi_from_image(img, face_bbox, 1.35)
landmark_info = landmark106p_detect_handler.execute(cropped_image)
landmark106p_with_bbox_result_image = cropped_image.copy()
landmark106p_with_bbox_result_all_points = [(x, y) for x, y in
zip(landmark_info['x_locations'],
landmark_info['y_locations'])
]
face_parsing_with_bbox_result = face_parsing_handler.execute(cropped_image, landmark_info)
face_parsing_with_bbox_result_image = cropped_image.copy()
face_parsing_with_bbox_result_image = annotate_segmentation(
face_parsing_with_bbox_result_image,
face_parsing_with_bbox_result['semantic_segmentation']
)
cv2.imshow(f'face_parsing_with_bbox_result_image', face_parsing_with_bbox_result_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
lib/python/pack2/parser.py | leozz37/makani | 1,178 | 12727238 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for Pack2 definition files."""
import re
from makani.lib.python.pack2 import metadata
from ply import lex
from ply import yacc
# PLY's token and parser rule naming conflicts with our style.
# pylint: disable=invalid-name
class Formatter(object):
"""Pack2 code formatter.
This class is fed tokens from the lexer and produces a conically formatted
version of the code.
"""
# Maps each block type to its line terminating character.
_new_line_char_map = {
'bitfield8': ',',
'bitfield16': ',',
'bitfield32': ',',
'enum8': ',',
'enum16': ',',
'enum32': ',',
'header': ';',
'param': ';',
'scaled8': ',',
'scaled16': ',',
'scaled32': ',',
'specialize': ';',
'struct': ';',
}
# Tokens that eat whitespace after them.
_whitespace_eaters = set([
'[',
'(',
])
def __init__(self):
self.preamble = ''
self.formatted = ''
self.first_line = True
self.new_line = ''
self.block_level = 0
self.indent = ' '
self.new_line_char = None
self.eat_whitespace = False
self.extra_new_line = False
self.prev_token = None
self.eat_new_line = False
self.ignore_line = False
def _NextNewLine(self, token):
"""Return the nominal line break for the next statement."""
if self.block_level == 0 and token == ';':
self.eat_new_line = True
return '\n\n'
if token == '}':
self.eat_new_line = True
return '\n\n'
elif token == '{':
self.eat_new_line = True
return '\n'
elif token == self.new_line_char:
return '\n'
else:
return None
def _ExtraNewLineAllowed(self, token):
"""Determine if an extra new line is allowed.
Args:
token: The token currently being added.
Returns:
True if an extra new line is allowed before the current statement.
"""
if self.block_level < 1:
return False
if token == '}':
return False
if self.eat_new_line:
return False
return True
def _BlockIndent(self):
indent = ''
for _ in range(0, self.block_level):
indent += self.indent
return indent
def _NewLine(self, token):
"""Calculate line break.
Calculates the appropriate line break sequence to proceed the current
token being added.
Args:
token: The token currently being added.
Returns:
A string containing the appropriate line break sequence.
"""
if self.extra_new_line and self._ExtraNewLineAllowed(token):
# Single blank lines are allowed within blocks to allow for logical
# grouping of fields/values.
new_line = '\n\n'
else:
new_line = self.new_line
self.extra_new_line = False
self.eat_new_line = False
return new_line + self._BlockIndent()
def AddToken(self, token, whitespace):
"""Add a token to the formatter.
Args:
token: The token to add.
whitespace: The nominal whitespace to add before the token.
"""
# Ignore include lines. These will be prepended later in alphabetical
# order.
if not self.prev_token or self.prev_token == '\n':
if token == 'include':
self.ignore_line = True
if self.ignore_line:
return
if self.new_line:
self.formatted += self._NewLine(token)
elif not self.first_line and not self.eat_whitespace:
self.formatted += whitespace
self.formatted += str(token)
self.new_line = self._NextNewLine(token)
self.first_line = False
self.eat_whitespace = token in self._whitespace_eaters
if token in self._new_line_char_map:
self.new_line_char = self._new_line_char_map[token]
self.prev_token = token
def AddComment(self, comment):
"""Add comment to the formatter."""
# Special case comments at the top of files as they are allowed to come
# before include directive.
if self.first_line:
self.preamble += '// ' + str(comment) + '\n'
return
if self.prev_token == '\n' or self.first_line:
# A comment following a new line should be on it's own line.
self.formatted += self._NewLine('')
else:
# Otherwise it should be exactly two spaces after the end of line.
self.formatted += ' '
self.formatted += '// ' + str(comment)
self.new_line = '\n'
def ProcessNewLine(self, count):
self.prev_token = '\n'
self.extra_new_line = count > 1
self.ignore_line = False
def EnterBlock(self):
self.block_level += 1
def ExitBlock(self):
self.block_level -= 1
class ParseError(Exception):
def __init__(self, value, errors):
super(self.__class__, self).__init__(value)
self.errors = errors
self.value = value
def __str__(self):
string = self.value + '\n'
for e in self.errors:
string += e
return string
class Lexer(object):
"""Lexer for Pack2 definition files."""
def __init__(self, error_func):
self.error_func = error_func
self.formatter = Formatter()
def Build(self, **kwargs):
# Building the lexer is separate from __init__() because the PLY
# docs warn against calling lex() from __init__
self.lexer = lex.lex(object=self, **kwargs)
keywords = [
'BITFIELD8',
'BITFIELD16',
'BITFIELD32',
'ENUM8',
'ENUM16',
'ENUM32',
'HEADER',
'INCLUDE',
'PARAM',
'SCALED8',
'SCALED16',
'SCALED32',
'SPECIALIZE',
'STRING',
'STRUCT',
]
keyword_map = {keyword.lower(): keyword for keyword in keywords}
tokens = keywords + [
'ID',
'COLON',
'COMMA',
'EQUAL',
'SEMICOLON',
'LCURLY',
'RCURLY',
'LPAREN',
'RPAREN',
'LSQUARE',
'RSQUARE',
'FLOAT_LITERAL',
'HEX_LITERAL',
'BIN_LITERAL',
'NEG_DEC_LITERAL',
'DEC_LITERAL',
'STRING_LITERAL',
]
# Ignored characters
t_ignore = ' \t'
# Tokens
#
# PLY makes use of docstrings in token functions to specify the token regex.
# Furthermore it uses raw strings because, according the manual, "they are the
# most convenient way to write regular expression strings."
#
# pylint: disable=g-docstring-quotes,g-short-docstring-punctuation
# The order of the functions here reflects the order in which the
# lexer matches tokens.
def t_FLOAT_LITERAL(self, t):
r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'
self.formatter.AddToken(t.value, ' ')
t.value = float(t.value)
return t
def t_HEX_LITERAL(self, t):
r'0x[0-9A-Fa-f]+'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value[2:], 16)
return t
def t_BIN_LITERAL(self, t):
r'0b[01]+'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value[2:], 2)
return t
def t_NEG_DEC_LITERAL(self, t):
r'-(0|[1-9][0-9]*)'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value, 10)
return t
def t_DEC_LITERAL(self, t):
r'\+?0|[1-9][0-9]*'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value, 10)
return t
def t_STRING_LITERAL(self, t):
r'"[^"]*"'
self.formatter.AddToken(t.value, ' ')
t.value = t.value[1:-1] # Remove quotes.
return t
def t_ID(self, t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
self.formatter.AddToken(t.value, ' ')
t.type = self.keyword_map.get(t.value, 'ID')
return t
def t_comment(self, t):
r'//[ \t]*(?P<comment_text>.*)'
self.formatter.AddComment(t.lexer.lexmatch.group('comment_text'))
def t_newline(self, t):
r'\n+'
self.formatter.ProcessNewLine(t.value.count('\n'))
t.lexer.lineno += t.value.count('\n')
def t_COLON(self, t):
r':' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_COMMA(self, t):
r',' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_EQUAL(self, t):
r'=' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, ' ')
return t
def t_SEMICOLON(self, t):
r';' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_LCURLY(self, t):
r'\{' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, ' ')
self.formatter.EnterBlock()
return t
def t_RCURLY(self, t):
r'\}' # pylint: disable=invalid-name
self.formatter.ExitBlock()
self.formatter.AddToken(t.value, '')
return t
def t_LPAREN(self, t):
r'\(' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
self.formatter.EnterBlock()
return t
def t_RPAREN(self, t):
r'\)' # pylint: disable=invalid-name
self.formatter.ExitBlock()
self.formatter.AddToken(t.value, '')
return t
def t_LSQUARE(self, t):
r'\[' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_RSQUARE(self, t):
r'\]' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_error(self, t):
self.error_func('%d: Illegal character \'%s\'' % (t.lineno, t.value[0]))
t.lexer.skip(1)
# pylint: enable=g-docstring-quotes,g-short-docstring-punctuation
class _DefaultFileLoader(object):
def __init__(self):
pass
def ReadFile(self, file_name):
with open(file_name, 'r') as f:
contents = f.read()
return contents
class Parser(object):
"""Parser for Pack2 definition files."""
def __init__(self, file_loader=None, loaded_files=None):
self.lexer = Lexer(error_func=self._RecordError)
self.lexer.Build()
self.tokens = self.lexer.tokens
if loaded_files:
self.loaded_files = loaded_files
else:
self.loaded_files = set()
if file_loader is None:
self.file_loader = _DefaultFileLoader()
else:
self.file_loader = file_loader
self.include_re = re.compile(r'(.*)\.p2$')
# TODO: Investigate generating tables at build time and
# packaging them with the library.
self.parser = yacc.yacc(module=self, debug=False, write_tables=False)
def Parse(self, string):
"""Parse a Pack2 definition string."""
self.valid = True
self.metadata = metadata.Metadata()
self.errors = []
try:
self.parser.parse(string, tracking=True)
except IndexError as e:
# Due to a bug in PLY, an index error is caused if we raise a syntax
# error. If we've previously raised a syntax error, ignore it so that
# we can raise a ParseError instead.
if self.valid:
raise e
if not self.valid:
raise ParseError('Parse Error', self.errors)
return self.metadata
def GetFormattedSource(self):
preamble = self.lexer.formatter.preamble
if self.metadata.includes:
if preamble:
preamble += '\n'
for inc in sorted(self.metadata.includes):
preamble += ('include "%s.p2";\n' % inc)
preamble += '\n'
return preamble + self.lexer.formatter.formatted + '\n'
def _RecordError(self, string):
self.valid = False
self.errors.append(string)
def _RaiseError(self, string):
self._RecordError(string)
raise SyntaxError(string)
def HandleWidthType(self, base_name, p):
"""Common handing for types that have 8, 16, and 32 bit widths.
Grammar for type should be of the follow:
type_def : type_keyword ID LCURLY type_body RCURLY
Args:
base_name: The type's base name (eg. 'enum', 'bitfield', or 'scaled'.)
p: The PLY parser arguments from the production rule.
Returns:
A dict containing 'type', 'name', 'body', and 'width'.
"""
info = {
'type': p[1],
'name': p[2],
'body': p[4],
}
if info['type'] == base_name + '8':
info['width'] = 1
elif info['type'] == base_name + '16':
info['width'] = 2
elif info['type'] == base_name + '32':
info['width'] = 4
else:
self._RaiseError('%d: invalid %s type %s.\n'
% (p.lineno(1), base_name, info['type']))
return info
def ResolveType(self, type_name, lineno=-1):
if type_name not in self.metadata.type_map:
self._RaiseError('%d: Type \'%s\' unknown.\n' % (lineno, type_name))
raise SyntaxError
return self.metadata.type_map[type_name]
# PLY makes use of docstrings in production function to specify the grammar.
# These do not conform to the google style for doc strings.
#
# pylint: disable=g-short-docstring-punctuation
# pylint: disable=g-doc-args
# pylint: disable=g-no-space-after-docstring-summary
def p_file(self, p):
"""file : bitfield_def file
| enum_def file
| header_def file
| include_def file
| param_def file
| scaled_def file
| specialize_def file
| struct_def file
|
"""
def p_include_def(self, p):
"""include_def : INCLUDE STRING_LITERAL SEMICOLON"""
file_name = p[2]
match = self.include_re.match(file_name)
if not match:
self._RaiseError('%d: %s is not named like a p2 file.' % (p.lineno(2),
file_name))
path = match.group(1)
if file_name in self.loaded_files:
return
self.loaded_files.add(file_name)
contents = self.file_loader.ReadFile(file_name)
parser = Parser(file_loader=self.file_loader,
loaded_files=self.loaded_files)
meta = parser.Parse(contents)
try:
self.metadata.AddInclude(path, meta)
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_struct_def(self, p):
"""struct_def : STRUCT ID LCURLY struct_body RCURLY"""
name = p[2]
body = p[4]
try:
self.metadata.AddType(metadata.StructType(name, body))
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_enum_def(self, p):
"""enum_def : enum_keyword ID LCURLY enum_body RCURLY"""
try:
info = self.HandleWidthType('enum', p)
enum = metadata.EnumType(info['name'], info['width'], info['body'])
self.metadata.AddType(enum)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_enum8_keyword(self, p):
"""enum_keyword : ENUM8
| ENUM16
| ENUM32
"""
p[0] = p[1]
def p_bitfield_def(self, p):
"""bitfield_def : bitfield_keyword ID LCURLY bitfield_body RCURLY"""
try:
info = self.HandleWidthType('bitfield', p)
bitfield = metadata.BitfieldType(info['name'],
info['width'],
info['body'])
self.metadata.AddType(bitfield)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_bitfield8_keyword(self, p):
"""bitfield_keyword : BITFIELD8
| BITFIELD16
| BITFIELD32
"""
p[0] = p[1]
def p_scaled_def(self, p):
"""scaled_def : scaled_keyword ID LCURLY scaled_body RCURLY"""
try:
info = self.HandleWidthType('scaled', p)
if 'scale' not in info['body']:
self._RaiseError('%d: Scaled type %s does not contain scale property.'
%(p.lineno(2), info['name']))
if 'offset' not in info['body']:
self._RaiseError('%d: Scaled type %s does not contain offset property.'
%(p.lineno(2), info['name']))
scale = info['body']['scale']
offset = info['body']['offset']
scaled = metadata.ScaledType(info['name'], info['width'],
offset=offset, scale=scale)
self.metadata.AddType(scaled)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_scaled8_keyword(self, p):
"""scaled_keyword : SCALED8
| SCALED16
| SCALED32
"""
p[0] = p[1]
def p_param_def(self, p):
"""param_def : PARAM ID LCURLY struct_body RCURLY"""
name = p[2]
body = p[4]
try:
param = metadata.Param(name, body)
self.metadata.AddType(param)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_header_def(self, p):
"""header_def : HEADER ID LCURLY struct_body RCURLY"""
name = p[2]
body = p[4]
try:
header = metadata.Header(name, body)
self.metadata.AddType(header)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
# pylint: disable=line-too-long
def p_speclialize_def(self, p):
"""specialize_def : SPECIALIZE LPAREN ID RPAREN ID LCURLY struct_body RCURLY"""
# pylint: enable=line-too-long
parent_name = p[3]
name = p[5]
body = p[7]
if parent_name not in self.metadata.type_map:
self._RaiseError('%d: Unknown parent type %s.\n'
% (p.lineno(2), parent_name))
parent_type = self.metadata.type_map[parent_name]
try:
new_type = parent_type.Specialize(name, body)
self.metadata.AddType(new_type)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_struct_body(self, p):
"""struct_body : struct_body field_def
| field_def
"""
try:
if len(p) == 2:
line = p.lineno(1)
body = metadata.StructBody()
body.AddField(p[1])
elif len(p) == 3:
line = p.lineno(2)
body = p[1]
body.AddField(p[2])
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_field_def(self, p):
"""field_def : ID ID SEMICOLON"""
type_name = p[1]
name = p[2]
field_type = self.ResolveType(type_name, p.lineno(1))
p[0] = metadata.StructField(field_type, name)
def p_string_field_def(self, p):
"""field_def : STRING LSQUARE unsigned_literal RSQUARE ID SEMICOLON"""
length = p[3]
name = p[5]
type_obj = metadata.StringType(length)
p[0] = metadata.StructField(type_obj, name)
def p_array_field_def(self, p):
"""field_def : ID ID LSQUARE unsigned_literal RSQUARE SEMICOLON"""
type_name = p[1]
name = p[2]
extent = p[4]
field_type = self.ResolveType(type_name, p.lineno(1))
p[0] = metadata.StructField(field_type, name, extent)
def p_enum_body(self, p):
"""enum_body : enum_body enum_value
| enum_value
"""
try:
if len(p) == 2:
line = p.lineno(1)
value = p[1]
body = metadata.EnumBody()
elif len(p) == 3:
line = p.lineno(2)
value = p[2]
body = p[1]
body.AddValue(value[0], value[1])
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_enum_value(self, p):
"""enum_value : ID EQUAL signed_literal COMMA"""
p[0] = (p[1], p[3])
def p_bitfield_body(self, p):
"""bitfield_body : bitfield_body bitfield_value
| bitfield_value
"""
try:
if len(p) == 2:
line = p.lineno(1)
value = p[1]
body = metadata.BitfieldBody()
elif len(p) == 3:
line = p.lineno(2)
value = p[2]
body = p[1]
body.AddFlag(value[0], value[1])
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_scaled_body(self, p):
"""scaled_body : scaled_body scaled_property
| scaled_property
"""
try:
if len(p) == 2:
line = p.lineno(1)
value = p[1]
body = {}
elif len(p) == 3:
line = p.lineno(2)
value = p[2]
body = p[1]
if value[0] in body:
self._RaiseError('%d: Scaled property %s repeated.' % (line, value[0]))
body[value[0]] = value[1]
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_scaled_property(self, p):
"""scaled_property : ID EQUAL FLOAT_LITERAL COMMA
| ID EQUAL signed_literal COMMA
"""
name = p[1]
value = p[3]
if name != 'scale' and name != 'offset':
self._RaiseError('%d: Unknown scaled property %s.' % (p.lineno(1), name))
p[0] = (name, value)
def p_bitfield_value(self, p):
"""bitfield_value : unsigned_literal COLON ID COMMA"""
p[0] = (p[3], p[1])
def p_unsigned_literal(self, p):
"""unsigned_literal : HEX_LITERAL
| DEC_LITERAL
| BIN_LITERAL
"""
p[0] = p[1]
def p_signed_literal(self, p):
"""signed_literal : unsigned_literal
| NEG_DEC_LITERAL
"""
p[0] = p[1]
def p_error(self, p):
self.valid = False
self.errors.append('%d: Syntax error at \'%s\'\n' % (p.lineno, p.value))
# pylint: enable=g-short-docstring-punctuation
# pylint: enable=g-no-space-after-docstring-summary
# pylint: enable=g-no-space-after-docstring-summary
# pylint: enable=invalid-name
|
scripts/python/break-contigs-vs-truth.py | benjeffery/mccortex | 104 | 12727271 | #!/usr/bin/env python
from __future__ import print_function
try: input = raw_input
except: pass
# usage: python break-contigs-vs-truth.py <k> [input.txt]
# input.txt:
# <master> :string
# <query1> :string
# <query2> :string
# ...
#
# Output:
# <contig-id> <ref-strand> <ref-start> <contig-substr>
# ...
#
# For each query string, report all maximal substring alignments between it and
# the master string. Alignments are of length >= k and can include single base
# mismatches as long as they are flanked by k matching bases.
#
# Next we print coverage of ref using maximal alignments to contigs.
# Finally we report NG50 and number of assembly errors.
#
# Time is on average NlogN where N is number of query bases. Worst case N^2
# e.g. aaaaaaaaaaaa vs aaaaaaaaaaaa
# Memory usage is linear with length of the master string and number of maximal
# substring matches.
#
# <NAME> 2016-09-10
# MIT License
import fileinput
import pyRBT
import sys
from collections import defaultdict
class Alignment:
def __init__(self,seqid,start1,start2,length):
self.seqid = seqid
self.start1,self.start2,self.length = start1,start2,length
def __cmp__(x,y):
if x.seqid != y.seqid: return x.seqid - y.seqid
xoff,yoff = x.start1-x.start2,y.start1-y.start2
if xoff != yoff: return xoff - yoff
if x.start1 >= y.start1+y.length: return 1
if y.start1 >= x.start1+x.length: return -1
return 0 # one is within the other
def __lt__(x,y): return x.__cmp__(y) > 0
def __ge__(x,y): return x.__cmp__(y) >= 0
def __eq__(x,y): return x.__cmp__(y) == 0
def __ne__(x,y): return x.__cmp__(y) != 0
def __le__(x,y): return x.__cmp__(y) <= 0
def __lt__(x,y): return x.__cmp__(y) < 0
def __str__(self):
return ("Alignment(id:"+str(self.seqid)+"," +
"starts:("+str(self.start1)+","+str(self.start2)+")," +
"len:"+str(self.length)+")")
def dna_reverse_complement(s):
h = {'a':'t','c':'g','g':'c','t':'a','A':'T','C':'G','G':'C','T':'A'}
t = [ h.get(c,c) for c in s ]
return ''.join(t[::-1])
# upper case
def build_occ_hash(occ,k,seq,seqid):
for i in range(len(seq)-k+1):
occ[seq[i:i+k].upper()].append((seqid,i))
# Extend a match as far as we have an exact match or a mismatch flanked by k
# exact matches either side. Seeded from an initial matching kmer
def extend_match_kmer_match(a,b,s1,e1,s2,e2,M):
while True:
while s1 > 0 and s2 > 0 and a[s1-1].upper() == b[s2-1].upper(): s1,s2 = s1-1,s2-1
if s1 > M and s2 > M and a[s1-M-1:s1-1].upper() == b[s2-M-1:s2-1].upper():
s1 -= M+1; s2 -= M+1
else: break
while True:
while e1 < len(a) and e2 < len(b) and a[e1].upper() == b[e2].upper(): e1,e2 = e1+1,e2+1
if e1+M < len(a) and e2+M < len(b) and a[e1+1:e1+1+M].upper() == b[e2+1:e2+1+M].upper():
e1 += M+1; e2 += M+1
else: break
return (s1,e1,s2,e2)
def count_str_matches(a,b):
return sum([ i.upper() == j.upper() for i,j in zip(a,b) ])
# Extend a match as far as we have an exact match or N matches within M bases
def extend_match_kmer_match2(a,b,s1,e1,s2,e2,M,N):
while True:
while s1 > 0 and s2 > 0 and a[s1-1].upper() == b[s2-1].upper(): s1,s2 = s1-1,s2-1
if s1 > M and s2 > M and count_str_matches(a[s1-1-M:s1-1], b[s2-1-M:s2-1]) >= N:
s1 -= M+1; s2 -= M+1
else: break
while True:
while e1 < len(a) and e2 < len(b) and a[e1].upper() == b[e2].upper(): e1,e2 = e1+1,e2+1
if e1+M < len(a) and e2+M < len(b) and count_str_matches(a[e1+1:e1+1+M], b[e2+1:e2+1+M]) >= N:
e1 += M+1; e2 += M+1
else: break
return (s1,e1,s2,e2)
# Segment tree
# Query if there is an uncovered region in a set of intervals
class GapSegNode:
def __init__(self,start,end,l=None,r=None):
(self.start,self.end,self.l,self.r,self.regs) = (start,end,l,r,[])
def add_interval(self,reg):
if reg[0] >= self.end or reg[1] <= self.start: return # does not overlap
if reg[0] <= self.start and reg[1] >= self.end: # contained
self.regs.append(reg)
else:
if self.l is not None: self.l.add_interval(reg)
if self.r is not None: self.r.add_interval(reg)
def gap_in_interval(self,reg):
if reg[0] >= self.end or reg[1] <= self.start: return False # does not overlap
if len(self.regs) > 0: return False # this node is covered
if self.l is None and self.r is None: return True # leaf node
return ((self.l is not None and self.l.gap_in_interval(reg)) or
(self.r is not None and self.r.gap_in_interval(reg)))
def __str__(self):
return "GapSegNode("+str(self.start)+","+str(self.end)+")"
@staticmethod
def build_tree(start,end):
# build tree bottom up
assert start < end
l = [ GapSegNode(i,i+1) for i in range(start,end) ]
while len(l) > 1:
N = 2*int(len(l)//2)
m = [ GapSegNode(l[i].start,l[i+1].end,l[i],l[i+1]) for i in range(0,N,2) ]
if len(l)%2 == 1: m.append(GapSegNode(l[-1].start, l[-1].end, l[-1]))
l = m
return l[0]
# `l` is a list of alignments of sequence `seq` against the ref
# for each kmer in seq, keep the longest substring in l that covers it
# discard all substrings that are not kept
def reduce_alignments(seq,l):
# sort alignments by length (longest to shortest)
l.sort(key=lambda x: -x.length)
# iterate over alignments, only taking those that cover uncovered kmers
gst = GapSegNode.build_tree(0,len(seq))
keep = []
for aln in l:
reg = (aln.start2, aln.start2+aln.length)
if gst.gap_in_interval(reg):
keep.append(aln)
gst.add_interval(reg)
return keep
# l is a list of (start,length) contig alignments
# removes substrings from l (BEWARE: they get deleted!)
# returns contig_index,contig_length
def ng50_from_coverage(l,reflen):
# sort by start (ascending) and length (descending) to remove substrings
# on the ref
l.sort(key=lambda x: (x[0],-x[1]))
j = end = 0
for x in l:
if x[0]+x[1] > end:
end = x[0]+x[1]
l[j] = x
j += 1
del(l[j:])
# sort by length (descending), start position (ascending)
l.sort(key=lambda x: (-x[1],x[0]))
halflen = reflen//2
lensum = n = 0
while lensum < halflen:
if n+1 == len(l):
print("Warning: haven't assembled half of ref, NG50 is underestimate",
file=sys.stderr)
break
lensum += l[n][1]
n += 1
return (n,l[n][1])
# for a given alignment, get left hand position on + strand
def convert_ref_strandpos(aln,reflen):
return aln.start1 if aln.seqid&1 == 0 else reflen-(aln.start1+aln.length)
def main(k,path):
master = input().strip()
masterrc = dna_reverse_complement(master)
print(master)
print(masterrc)
occ = defaultdict(list) # [ (strand,pos) ... ]
build_occ_hash(occ,k,master,0)
build_occ_hash(occ,k,masterrc,1)
n_asm_errors = n_no_matches = n_output = 0
m_cov = []
strands = ["+","-"]
print("# Matching contigs sections")
print("# contig-id contig-substr ref-start ref-strand")
# Iterate over queries
qi = 0
for q in fileinput.input(path):
q = q.strip()
rbt = pyRBT.pyRBT()
l = []
for i in range(len(q)-k+1):
kmer = q[i:i+k].upper()
for (seqid,pos) in occ[kmer]:
# create alignment
aln = Alignment(seqid,pos,i,k)
if aln not in rbt:
# extend alignment
s1,s2 = pos,i
m = master if seqid==0 else masterrc
(s1,e1,s2,e2) = extend_match_kmer_match(m,q,s1,s1+k,s2,s2+k,5)
# (s1,e1,s2,e2) = extend_match_kmer_match2(m,q,s1,s1+k,s2,s2+k,10,6)
aln.start1,aln.start2,aln.length = s1,s2,e1-s1
# store and print
rbt.insert(aln)
l.append(aln)
for x in l:
s = convert_ref_strandpos(x,len(master))
m_cov.append((s,x.length,qi,x.seqid&1,x.start2))
l = reduce_alignments(q,l)
for x in l:
s = convert_ref_strandpos(x,len(master))
print(qi,q[x.start2:x.start2+x.length],s,strands[x.seqid&1])
n_asm_errors += max(0,len(l)-1)
n_no_matches += (len(l) == 0)
n_output += len(l)
qi += 1
(_,ng50) = ng50_from_coverage(m_cov,len(master))
print() # empty line separates output, now print ref matches
print("# Ref positions assembled (longest to shortest)")
print("# start positions are 0-based and indicate the left hand position")
print("# ref-start length contig-id contig-start contig-strand")
m_cov.sort() # sort by start, length (both ascending)
for x in m_cov: print(x[0],x[1],x[2],x[4],strands[x[3]])
print("contigs_read:",qi,file=sys.stderr)
print("contigs_printed:",n_output,file=sys.stderr)
print("assembly_errors:",n_asm_errors,file=sys.stderr)
print("nomatch_contigs:",n_no_matches,file=sys.stderr)
# number of unique segments of the reference that were assembled
print("num_uniq_ref_segs:",len(m_cov),file=sys.stderr)
print("reflen:",len(master),file=sys.stderr)
print("NG50:",ng50,file=sys.stderr)
def usage(err=None):
if err is not None: print(err,file=sys.stderr)
print("python break-contigs-vs-truth.py <k> [contigs.txt]",file=sys.stderr)
print(" Reads ref genome from STDIN as a single line",file=sys.stderr)
print(" If contigs.txt not passed, reads from STDIN after reading ref",file=sys.stderr)
exit(-1)
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3: usage()
try: k = int(sys.argv[1])
except ValueError: usage("Error: invalid kmer value '"+sys.argv[1]+"'")
path = sys.argv[2] if len(sys.argv) > 2 else "-"
print("k:",k,"path:",path,file=sys.stderr)
main(k,path)
|
server/common/config/__init__.py | andrewsu/cellxgene | 403 | 12727302 | DEFAULT_SERVER_PORT = 5005
BIG_FILE_SIZE_THRESHOLD = 100 * 2 ** 20 # 100MB
|
bin/license_finder_pip.py | nitram509/LicenseFinder | 1,039 | 12727305 | #!/usr/bin/env python
import json
import sys
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
# since pip 19.3
from pip._internal.network.session import PipSession
except ImportError:
try:
# since pip 10
from pip._internal.download import PipSession
except ImportError:
from pip.download import PipSession
from pip._vendor import pkg_resources
from pip._vendor.six import print_
reqs = []
for req in parse_requirements(sys.argv[1], session=PipSession()):
try:
if req.req is not None and (req.markers is None or req.markers.evaluate()):
reqs.append(pkg_resources.Requirement.parse(str(req.req)))
except AttributeError:
# Since pip 20.1 (pip now takes care of markers at the resolve step)
if req.requirement is not None:
reqs.append(pkg_resources.Requirement.parse(str(req.requirement)))
transform = lambda dist: {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'dependencies': list(map(lambda dependency: dependency.project_name, dist.requires())),
}
packages = [transform(dist) for dist in pkg_resources.working_set.resolve(reqs)]
print_(json.dumps(packages))
|
modules/intelligence-gathering/asm.py | decidedlygray/ptf | 4,391 | 12727335 | #!/usr/bin/env python
#######################################
# Installation module for AttackSurfaceMapper
#######################################
DESCRIPTION="This module will install/update Attack Surface Mapper (ASM) by Andreas Georgiou - A tool that aim to automate the recon process"
AUTHOR="<NAME>"
INSTALL_TYPE="GIT"
REPOSITORY_LOCATION="https://github.com/superhedgy/AttackSurfaceMapper.git"
INSTALL_LOCATION="ASM"
DEBIAN="python3,pip"
AFTER_COMMANDS="cd {INSTALL_LOCATION},python3 -m pip install --no-cache-dir -r requirements.txt"
|
py/agentflow/meta_options/control_flow/examples/simple_insertion.py | wx-b/dm_robotics | 128 | 12727336 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""An Control-Flow example which builds up a simple insertion experiment."""
from typing import Optional
from absl import app
import dm_env
from dm_robotics.agentflow import core
from dm_robotics.agentflow import subtask
from dm_robotics.agentflow.meta_options.control_flow import cond
from dm_robotics.agentflow.meta_options.control_flow import loop_ops
from dm_robotics.agentflow.meta_options.control_flow import sequence
from dm_robotics.agentflow.meta_options.control_flow.examples import common
from dm_robotics.agentflow.rendering import graphviz_renderer
def near_socket(unused_timestep: dm_env.TimeStep,
unused_result: Optional[core.OptionResult]) -> bool:
return False
def last_option_successful(unused_timestep: dm_env.TimeStep,
result: core.OptionResult):
return result.termination_reason == core.TerminationType.SUCCESS
def build():
"""Builds example graph."""
env = common.DummyEnv()
# Define a subtask that exposes the desired RL-environment view on `base_task`
my_subtask = common.DummySubTask(env.observation_spec(), 'Insertion SubTask')
# Define a regular RL agent against this task-spec.
my_policy = common.DummyPolicy(my_subtask.action_spec(),
my_subtask.observation_spec(), 'My Policy')
# Compose the policy and subtask to form an Option module.
learned_insert_option = subtask.SubTaskOption(
my_subtask, my_policy, name='Learned Insertion')
reach_option = common.DummyOption(env.action_spec(), env.observation_spec(),
'Reach for Socket')
scripted_reset = common.DummyOption(env.action_spec(), env.observation_spec(),
'Scripted Reset')
extract_option = common.DummyOption(env.action_spec(), env.observation_spec(),
'Extract')
recovery_option = common.DummyOption(env.action_spec(),
env.observation_spec(), 'Recover')
# Use some AgentFlow operators to embed the agent in a bigger agent.
# First use Cond to op run learned-agent if sufficiently close.
reach_or_insert_op = cond.Cond(
cond=near_socket,
true_branch=learned_insert_option,
false_branch=reach_option,
name='Reach or Insert')
# Loop the insert-or-reach option 5 times.
reach_and_insert_5x = loop_ops.Repeat(
5, reach_or_insert_op, name='Retry Loop')
loop_body = sequence.Sequence([
scripted_reset,
reach_and_insert_5x,
cond.Cond(
cond=last_option_successful,
true_branch=extract_option,
false_branch=recovery_option,
name='post-insert')
])
main_loop = loop_ops.While(lambda _: True, loop_body)
graphviz_renderer.open_viewer(main_loop)
return main_loop
def main(unused_argv):
build()
if __name__ == '__main__':
app.run(main)
|
maro/cli/data_pipeline/vm_scheduling.py | yangboz/maro | 598 | 12727384 | import gzip
import os
import shutil
import time
from csv import reader, writer
from typing import List
import aria2p
import pandas as pd
from yaml import safe_load
from maro.cli.data_pipeline.base import DataPipeline, DataTopology
from maro.cli.data_pipeline.utils import StaticParameter, convert, download_file
from maro.utils.exception.cli_exception import CommandError
from maro.utils.logger import CliLogger
logger = CliLogger(name=__name__)
class VmSchedulingPipeline(DataPipeline):
"""Generate vm_scheduling Pipeline data and other necessary files for the specified topology.
The files will be generated in ~/.maro/data/vm_scheduling.
"""
_download_file_name = "AzurePublicDatasetLinksV2.txt"
_vm_table_file_name = "vmtable.csv.gz"
_raw_vm_table_file_name = "vmtable_raw.csv"
_clean_file_name = "vmtable.csv"
_build_file_name = "vmtable.bin"
_meta_file_name = "vmtable.yml"
# VM category includes three types, converting to 0, 1, 2.
_category_map = {'Delay-insensitive': 0, 'Interactive': 1, 'Unknown': 2}
def __init__(self, topology: str, source: str, sample: int, seed: int, is_temp: bool = False):
super().__init__(scenario="vm_scheduling", topology=topology, source=source, is_temp=is_temp)
self._sample = sample
self._seed = seed
self._download_folder = os.path.join(self._data_root, self._scenario, ".source", ".download")
self._raw_folder = os.path.join(self._data_root, self._scenario, ".source", ".raw")
self._download_file = os.path.join(self._download_folder, self._download_file_name)
self._vm_table_file = os.path.join(self._download_folder, self._vm_table_file_name)
self._raw_vm_table_file = os.path.join(self._raw_folder, self._raw_vm_table_file_name)
self._cpu_readings_file_name_list = []
self._clean_cpu_readings_file_name_list = []
self.aria2 = aria2p.API(
aria2p.Client(
host="http://localhost",
port=6800,
secret=""
)
)
self._download_file_list = []
def download(self, is_force: bool = False):
self._new_folder_list.append(self._download_folder)
os.makedirs(self._download_folder, exist_ok=True)
self._new_file_list.append(self._download_file)
if (not is_force) and os.path.exists(self._download_file):
logger.info_green("File already exists, skipping download.")
else:
logger.info_green(f"Downloading data from {self._source} to {self._download_file}.")
try:
download_file(source=self._source, destination=self._download_file)
except Exception as e:
logger.warning_yellow(f"Failed to download from {self._source} to {self._download_file}.")
raise CommandError("generate", f"Download error: {e}.")
# Download text with all urls.
if os.path.exists(self._download_file):
# Download vm_table and cpu_readings
self._aria2p_download(is_force=is_force)
else:
logger.warning(f"Not found downloaded source file: {self._download_file}.")
def _aria2p_download(self, is_force: bool = False) -> List[list]:
"""Read from the text file which contains urls and use aria2p to download.
Args:
is_force (bool): Is force or not.
"""
logger.info_green("Downloading vmtable and cpu readings.")
# Download parts of cpu reading files.
num_files = 195
# Open the txt file which contains all the required urls.
with open(self._download_file, mode="r", encoding="utf-8") as urls:
for remote_url in urls.read().splitlines():
# Get the file name.
file_name = remote_url.split('/')[-1]
# Two kinds of required files "vmtable" and "vm_cpu_readings-" start with vm.
if file_name.startswith("vmtable"):
if (not is_force) and os.path.exists(self._vm_table_file):
logger.info_green(f"{self._vm_table_file} already exists, skipping download.")
else:
logger.info_green(f"Downloading vmtable from {remote_url} to {self._vm_table_file}.")
self.aria2.add_uris(uris=[remote_url], options={'dir': self._download_folder})
elif file_name.startswith("vm_cpu_readings") and num_files > 0:
num_files -= 1
cpu_readings_file = os.path.join(self._download_folder, file_name)
self._cpu_readings_file_name_list.append(file_name)
if (not is_force) and os.path.exists(cpu_readings_file):
logger.info_green(f"{cpu_readings_file} already exists, skipping download.")
else:
logger.info_green(f"Downloading cpu_readings from {remote_url} to {cpu_readings_file}.")
self.aria2.add_uris(uris=[remote_url], options={'dir': f"{self._download_folder}"})
self._check_all_download_completed()
def _check_all_download_completed(self):
"""Check all download tasks are completed and remove the ".aria2" files."""
while 1:
downloads = self.aria2.get_downloads()
if len(downloads) == 0:
logger.info_green("Doesn't exist any pending file.")
break
if all([download.is_complete for download in downloads]):
# Remove temp .aria2 files.
self.aria2.remove(downloads)
logger.info_green("Download finished.")
break
for download in downloads:
logger.info_green(f"{download.name}, {download.status}, {download.progress:.2f}%")
logger.info_green("-" * 60)
time.sleep(10)
def _unzip_file(self, original_file_name: str, raw_file_name: str):
original_file = os.path.join(self._download_folder, original_file_name)
if os.path.exists(original_file):
raw_file = os.path.join(self._raw_folder, raw_file_name)
if os.path.exists(raw_file):
logger.info_green(f"{raw_file} already exists, skipping unzip.")
else:
# Unzip gz file.
with gzip.open(original_file, mode="rb") as f_in:
logger.info_green(
f"Unzip {original_file} to {raw_file}."
)
with open(raw_file, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
else:
logger.warning(f"Not found downloaded source file: {original_file}.")
def clean(self):
"""Unzip the csv file and process it for building binary file."""
super().clean()
self._new_folder_list.append(self._raw_folder)
os.makedirs(self._raw_folder, exist_ok=True)
logger.info_green("Cleaning VM data.")
# Unzip vmtable.
self._unzip_file(original_file_name=self._vm_table_file_name, raw_file_name=self._raw_vm_table_file_name)
# Unzip cpu readings.
for cpu_readings_file_name in self._cpu_readings_file_name_list:
raw_file_name = cpu_readings_file_name.split(".")[0] + "_raw.csv"
self._clean_cpu_readings_file_name_list.append(cpu_readings_file_name[:-3])
self._unzip_file(original_file_name=cpu_readings_file_name, raw_file_name=raw_file_name)
# Preprocess.
self._preprocess()
def _generate_id_map(self, old_id):
num = len(old_id)
new_id_list = [i for i in range(1, num + 1)]
id_map = dict(zip(old_id, new_id_list))
return id_map
def _process_vm_table(self, raw_vm_table_file: str):
"""Process vmtable file."""
headers = [
'vmid', 'subscriptionid', 'deploymentid', 'vmcreated', 'vmdeleted', 'maxcpu', 'avgcpu', 'p95maxcpu',
'vmcategory', 'vmcorecountbucket', 'vmmemorybucket'
]
required_headers = [
'vmid', 'subscriptionid', 'deploymentid', 'vmcreated', 'vmdeleted', 'vmcategory',
'vmcorecountbucket', 'vmmemorybucket'
]
vm_table = pd.read_csv(raw_vm_table_file, header=None, index_col=False, names=headers)
vm_table = vm_table.loc[:, required_headers]
# Convert to tick by dividing by 300 (5 minutes).
vm_table['vmcreated'] = pd.to_numeric(vm_table['vmcreated'], errors="coerce", downcast="integer") // 300
vm_table['vmdeleted'] = pd.to_numeric(vm_table['vmdeleted'], errors="coerce", downcast="integer") // 300
# The lifetime of the VM is deleted time - created time + 1 (tick).
vm_table['lifetime'] = vm_table['vmdeleted'] - vm_table['vmcreated'] + 1
vm_table['vmcategory'] = vm_table['vmcategory'].map(self._category_map)
# Transform vmcorecount '>24' bucket to 32 and vmmemory '>64' to 128.
vm_table = vm_table.replace({'vmcorecountbucket': '>24'}, 32)
vm_table = vm_table.replace({'vmmemorybucket': '>64'}, 128)
vm_table['vmcorecountbucket'] = pd.to_numeric(
vm_table['vmcorecountbucket'], errors="coerce", downcast="integer"
)
vm_table['vmmemorybucket'] = pd.to_numeric(vm_table['vmmemorybucket'], errors="coerce", downcast="integer")
vm_table.dropna(inplace=True)
vm_table = vm_table.sort_values(by='vmcreated', ascending=True)
# Generate ID map.
vm_id_map = self._generate_id_map(vm_table['vmid'].unique())
sub_id_map = self._generate_id_map(vm_table['subscriptionid'].unique())
deployment_id_map = self._generate_id_map(vm_table['deploymentid'].unique())
id_maps = (vm_id_map, sub_id_map, deployment_id_map)
# Mapping IDs.
vm_table['vmid'] = vm_table['vmid'].map(vm_id_map)
vm_table['subscriptionid'] = vm_table['subscriptionid'].map(sub_id_map)
vm_table['deploymentid'] = vm_table['deploymentid'].map(deployment_id_map)
# Sampling the VM table.
# 2695548 is the total number of vms in the original Azure public dataset.
if self._sample < 2695548:
vm_table = vm_table.sample(n=self._sample, random_state=self._seed)
vm_table = vm_table.sort_values(by='vmcreated', ascending=True)
return id_maps, vm_table
def _convert_cpu_readings_id(self, old_data_path: str, new_data_path: str, vm_id_map: dict):
"""Convert vmid in each cpu readings file."""
with open(old_data_path, 'r') as f_in:
csv_reader = reader(f_in)
with open(new_data_path, 'w') as f_out:
csv_writer = writer(f_out)
csv_writer.writerow(['timestamp', 'vmid', 'maxcpu'])
for row in csv_reader:
# [timestamp, vmid, mincpu, maxcpu, avgcpu]
if row[1] in vm_id_map:
new_row = [int(row[0]) // 300, vm_id_map[row[1]], row[3]]
csv_writer.writerow(new_row)
def _write_id_map_to_csv(self, id_maps):
file_name = ['vm_id_map', 'sub_id_map', 'deployment_id_map']
for index in range(len(id_maps)):
id_map = id_maps[index]
with open(os.path.join(self._raw_folder, file_name[index]) + '.csv', 'w') as f:
csv_writer = writer(f)
csv_writer.writerow(['original_id', 'new_id'])
for key, value in id_map.items():
csv_writer.writerow([key, value])
def _filter_out_vmid(self, vm_table: pd.DataFrame, vm_id_map: dict) -> dict:
new_id_map = {}
for key, value in vm_id_map.items():
if value in vm_table.vmid.values:
new_id_map[key] = value
return new_id_map
def _preprocess(self):
logger.info_green("Process vmtable data.")
# Process vmtable file.
id_maps, vm_table = self._process_vm_table(raw_vm_table_file=self._raw_vm_table_file)
filtered_vm_id_map = self._filter_out_vmid(vm_table=vm_table, vm_id_map=id_maps[0])
with open(self._clean_file, mode="w", encoding="utf-8", newline="") as f:
vm_table.to_csv(f, index=False, header=True)
logger.info_green("Writing id maps file.")
self._write_id_map_to_csv(id_maps=id_maps)
logger.info_green("Reading cpu data.")
# Process every cpu readings file.
for clean_cpu_readings_file_name in self._clean_cpu_readings_file_name_list:
raw_cpu_readings_file_name = clean_cpu_readings_file_name.split(".")[0] + "_raw.csv"
raw_cpu_readings_file = os.path.join(self._raw_folder, raw_cpu_readings_file_name)
clean_cpu_readings_file = os.path.join(self._clean_folder, clean_cpu_readings_file_name)
# Convert vmid.
logger.info_green(f"Process {clean_cpu_readings_file}.")
self._convert_cpu_readings_id(
old_data_path=raw_cpu_readings_file,
new_data_path=clean_cpu_readings_file,
vm_id_map=filtered_vm_id_map
)
def build(self):
super().build()
for clean_cpu_readings_file_name in self._clean_cpu_readings_file_name_list:
clean_cpu_readings_file = os.path.join(self._clean_folder, clean_cpu_readings_file_name)
if os.path.exists(clean_cpu_readings_file):
build_file_name = clean_cpu_readings_file_name.split(".")[0] + ".bin"
build_file = os.path.join(self._build_folder, build_file_name)
logger.info_green(f"Building binary data from {clean_cpu_readings_file} to {build_file}.")
cpu_meta_file = os.path.join(self._meta_folder, "cpu_readings.yml")
convert(meta=cpu_meta_file, file=[clean_cpu_readings_file], output=build_file)
else:
logger.warning_yellow(f"Not found cleaned data: {self._clean_file}.")
class VmSchedulingTopology(DataTopology):
def __init__(self, topology: str, source: str, sample: int, seed: int, is_temp=False):
super().__init__()
self._data_pipeline["vm_data"] = VmSchedulingPipeline(
topology=topology,
source=source,
sample=sample,
seed=seed,
is_temp=is_temp
)
class VmSchedulingProcess:
"""Contains all predefined data topologies of vm_scheduling scenario."""
meta_file_name = "source_urls.yml"
meta_root = os.path.join(StaticParameter.data_root, "vm_scheduling/meta")
def __init__(self, is_temp: bool = False):
self.topologies = {}
self.meta_root = os.path.expanduser(self.meta_root)
self._meta_path = os.path.join(self.meta_root, self.meta_file_name)
with open(self._meta_path) as fp:
self._conf = safe_load(fp)
for topology in self._conf["vm_data"].keys():
self.topologies[topology] = VmSchedulingTopology(
topology=topology,
source=self._conf["vm_data"][topology]["remote_url"],
sample=self._conf["vm_data"][topology]["sample"],
seed=self._conf["vm_data"][topology]["seed"],
is_temp=is_temp
)
|
testing-tools/lxml-ast.py | philippeitis/roxmltree | 228 | 12727397 | <reponame>philippeitis/roxmltree
#!/usr/bin/env python3
import sys
from lxml import etree
def escape_text(text):
return text.encode('unicode_escape').decode("utf-8")
def split_qname(name):
if name[0] == '{':
return name[1:].split('}')
else:
return [None, name]
def print_ind(depth, *args, **kwargs):
indent = ' ' * depth
indent = indent[:-1]
print(indent, *args, **kwargs)
def print_node(node, depth):
if node.tag is etree.Comment:
print_ind(depth, '- Comment: "{}"'.format(escape_text(node.text)))
if node.tail:
print_ind(depth, '- Text: "{}"'.format(escape_text(node.tail)))
return
if node.tag is etree.PI:
print_ind(depth, '- PI:')
print_ind(depth + 2, 'target: "{}"'.format(node.target))
print_ind(depth + 2, 'value: "{}"'.format(escape_text(node.text)))
if node.tail:
print_ind(depth, '- Text: "{}"'.format(escape_text(node.tail)))
return
print_ind(depth, '- Element:')
if node.tag[0] == '{':
uri, tag = split_qname(node.tag)
print_ind(depth + 2, 'tag_name: {}@{}'.format(tag, uri))
else:
print_ind(depth + 2, 'tag_name:', node.tag)
if node.attrib:
print_ind(depth + 2, 'attributes:')
attrs = []
for name, value in node.attrib.items():
uri, tag = split_qname(name)
if uri:
attrs.append([tag + '@' + uri, value])
else:
attrs.append([tag, value])
attrs = sorted(attrs, key=lambda x: x[0])
for name, value in attrs:
print_ind(depth + 3, '{}: "{}"'.format(name, escape_text(value)))
if node.nsmap:
print_ind(depth + 2, 'namespaces:')
ns_list = []
for name, value in node.nsmap.items():
if not name and not value:
ns_list.append(['None', '""'])
elif not name:
ns_list.append(['None', value])
elif not value:
ns_list.append([name, '""'])
else:
ns_list.append([name, value])
ns_list = sorted(ns_list, key=lambda x: x[0])
for name, value in ns_list:
print_ind(depth + 3, '{}: {}'.format(name, value))
if len(node):
print_ind(depth + 2, 'children:')
if node.text:
print_ind(depth + 3, '- Text: "{}"'.format(escape_text(node.text)))
for child in node:
print_node(child, depth + 3)
elif node.text:
print_ind(depth + 2, 'children:')
print_ind(depth + 3, '- Text: "{}"'.format(escape_text(node.text)))
if node.tail:
print_ind(depth, '- Text: "{}"'.format(escape_text(node.tail)))
tree = etree.parse(sys.argv[1])
root = tree.getroot()
print('Document:')
print_node(root, 1)
|
examples/03_Batching/simple_batching_client.py | ryanleary/tensorrt-laboratory | 226 | 12727404 | import grpc
import simple_pb2
import simple_pb2_grpc
def run():
with grpc.insecure_channel('localhost:50051') as channel:
stub = simple_pb2_grpc.InferenceStub(channel)
def requests():
messages = [simple_pb2.Input(batch_id=i) for i in range(10)]
for msg in messages:
print("Sending Stream batch_id={}".format(msg.batch_id))
yield msg
responses = stub.BatchedCompute(requests())
for resp in responses:
print("Received msg on stream with batch_id={}".format(resp.batch_id))
if __name__ == "__main__":
run()
|
app/utils/throttle.py | kenmingwang/ASoulCnki | 384 | 12727441 | from datetime import datetime
from urllib.parse import urlparse
class Throttle:
def __init__(self, delay):
self.domains = {} # 可以放到数据库中
self.delay = delay # 两次间隔下载间隔
def wait_url(self, url_str):
# 以netloc为基础进行休眠
domain_url = urlparse(url_str).netloc
last_accessed = self.domains.get(domain_url) # 根据字典键获取值
if self.delay > 0 and last_accessed is not None:
# 计算当前时间和上次访问时间段间隔,然后被规则时间减去,如果大于0,说明间隔时间不到,要继续休眠,否则的话直接下载下个网页
sleep_interval = self.delay - (datetime.now() - last_accessed).seconds
if sleep_interval > 0:
import time
time.sleep(sleep_interval)
self.domains[domain_url] = datetime.now()
|
geocode_sqlite/__init__.py | noslouch/geocode-sqlite | 222 | 12727479 | from .utils import geocode_table
|
evaluate/multipose_keypoint_val.py | nguyenbaviet/MultiPoseNet | 203 | 12727504 | import os, sys
root_path = os.path.realpath(__file__).split('/evaluate/multipose_keypoint_val.py')[0]
os.chdir(root_path)
sys.path.append(root_path)
from training.batch_processor import batch_processor
from network.posenet import poseNet
from datasets.coco import get_loader
from evaluate.tester import Tester
# Hyper-params
coco_root = '/data/COCO/'
backbone = 'resnet101' # 'resnet50'
data_dir = coco_root+'images/'
mask_dir = coco_root
json_path = coco_root+'COCO.json'
inp_size = 480 # input size 480*480
feat_stride = 4
# Set Training parameters
params = Tester.TestParams()
params.subnet_name = 'keypoint_subnet'
params.gpus = [0]
params.ckpt = './demo/models/ckpt_baseline_resnet101.h5'
params.batch_size = 6 * len(params.gpus)
params.print_freq = 50
# validation data
valid_data = get_loader(json_path, data_dir, mask_dir, inp_size, feat_stride,
preprocess='resnet', batch_size=params.batch_size-2*len(params.gpus), training=False,
shuffle=False, num_workers=4, subnet=params.subnet_name)
print('val dataset len: {}'.format(len(valid_data.dataset)))
# model
if backbone == 'resnet101':
model = poseNet(101)
elif backbone == 'resnet50':
model = poseNet(50)
for name, module in model.named_children():
for para in module.parameters():
para.requires_grad = False
tester = Tester(model, params, batch_processor, valid_data)
tester.val()
|
tests/test_schedule.py | skrytebane/meinheld | 1,186 | 12727507 | <gh_stars>1000+
from base import *
import requests
ASSERT_RESPONSE = b"Hello world!"
RESPONSE = [b"Hello ", b"world!"]
class App(BaseApp):
environ = None
def __call__(self, environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
self.environ = environ.copy()
print(environ)
return RESPONSE
def test_simple():
def _call():
assert(True)
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call)
server.run(App())
def test_args():
def _call(arg):
assert(arg == 1)
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call, 1)
server.run(App())
def test_args2():
def _call(a, b):
assert(a == 1)
assert(b == "ABC")
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call, 1, "ABC")
server.run(App())
def test_kwargs():
def _call(a=0, b="test"):
assert(a == 1)
assert(b == "ABC")
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call, b="ABC", a=1)
server.run(App())
def test_kwargs2():
def _call(a, b="test", c=False):
assert(a == 1)
assert(b == "ABC")
assert(c == True)
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call, 1, c=True, b="ABC")
server.run(App())
def test_kwargs3():
def _call(a, b="test", c=False):
assert(a == 1)
assert(b == "test")
assert(c == False)
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call, 1)
server.run(App())
def test_time():
def _call(a, b):
assert(a == 1)
assert(b == "ABC")
server.shutdown()
server.listen(("0.0.0.0", 8000))
server.schedule_call(5, _call, 1, "ABC")
server.run(App())
def test_nested():
def _schedule_call():
server.shutdown()
def _call():
server.schedule_call(0, _schedule_call)
server.listen(("0.0.0.0", 8000))
server.schedule_call(0, _call)
server.run(App())
def test_many():
l = [i for i in range(10)]
def _schedule_call():
assert(len(l) == 0)
server.shutdown()
def _call(i):
l.pop()
server.listen(("0.0.0.0", 8000))
for i in l:
server.schedule_call(0, _call, i)
server.schedule_call(1, _schedule_call)
server.run(App())
|
data/transforms/build.py | livestockai/CVWC2019-Amur-Tiger-Re-ID | 109 | 12727514 | # encoding: utf-8
"""
@author: loveletter
@contact: <EMAIL>
"""
import torchvision.transforms as T
from .transforms import RandomErasing
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
transform_ = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomRotation(cfg.INPUT.RO_DEGREE),
T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB, saturation=cfg.INPUT.SATURA_PROB, contrast=cfg.INPUT.CONTRAST_PROB, hue=cfg.INPUT.HUE_PROB),
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform
])
transform_body = T.Compose([
T.Resize(cfg.PART.SIZE_BODY),
T.RandomRotation(cfg.INPUT.RO_DEGREE),
T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB, saturation=cfg.INPUT.SATURA_PROB,
contrast=cfg.INPUT.CONTRAST_PROB, hue=cfg.INPUT.HUE_PROB),
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.PART.SIZE_BODY),
T.ToTensor(),
normalize_transform
])
transform_paw = T.Compose([
T.Resize(cfg.PART.SIZE_PAW),
T.RandomRotation(cfg.INPUT.RO_DEGREE),
T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB, saturation=cfg.INPUT.SATURA_PROB,
contrast=cfg.INPUT.CONTRAST_PROB, hue=cfg.INPUT.HUE_PROB),
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.PART.SIZE_PAW),
T.ToTensor(),
normalize_transform
])
return transform_, transform_body, transform_paw
else:
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
|
vlcp/service/utils/autoload.py | hubo1016/vlcp | 252 | 12727520 | <reponame>hubo1016/vlcp
'''
Created on 2016/5/27
:author: hubo
'''
from vlcp.server.module import Module, ModuleLoadStateChanged
from vlcp.config.config import defaultconfig
import pkgutil
@defaultconfig
class AutoLoad(Module):
'''
Auto load some modules from a package. Usually used to load network plugins.
'''
autosuccess = False
# Auto load packages from some packages
_default_autoloadpackages = ('vlcp.service.sdn.plugins',)
def __init__(self, server):
Module.__init__(self, server)
async def load(self, container):
await Module.load(self, container)
loadmodules = []
for p in self.autoloadpackages:
try:
def onerror(name):
self._logger.warning("Autoload package %r on package %r failed", name, p)
pkg = __import__(p, fromlist = ('dummy',))
for _, name, ispkg in pkgutil.walk_packages(pkg.__path__, p + '.', onerror):
if not ispkg:
try:
pymod = __import__(name, fromlist = ('dummy',))
for m in vars(pymod).values():
if isinstance(m, type) and issubclass(m, Module) and getattr(m, '__module__', '') == name:
loadmodules.append(m)
except Exception:
self._logger.warning('Autoload module %r failed', name, exc_info = True)
except Exception:
self._logger.warning('Autoload package %r failed', p, exc_info = True)
if loadmodules:
await container.execute_all([self.server.moduleloader.loadmodule(m) for m in loadmodules])
await self.changestate(ModuleLoadStateChanged.SUCCEEDED, container)
async def unload(self, container, force=False):
await Module.unload(self, container, force=force)
|
stochastic/processes/continuous/wiener.py | zaczw/stochastic | 268 | 12727527 | """Wiener process."""
from stochastic.processes.continuous.brownian_motion import BrownianMotion
class WienerProcess(BrownianMotion):
"""Wiener process, or standard Brownian motion.
.. image:: _static/wiener_process.png
:scale: 50%
:param float t: the right hand endpoint of the time interval :math:`[0,t]`
for the process
:param numpy.random.Generator rng: a custom random number generator
"""
def __init__(self, t=1, rng=None):
super().__init__(drift=0, scale=1, t=t, rng=rng)
def __str__(self):
return "Wiener process on [0, {t}]".format(t=str(self.t))
def __repr__(self):
return "WienerProcess(t={t})".format(t=str(self.t))
|
root/src/log.py | tensorlang/nao | 332 | 12727575 | <gh_stars>100-1000
from tensorflow.python.framework import dtypes
import sys
from nao.run import graph_summary
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def LogWithStep(summary_protobuf, global_step) -> dtypes.int64:
graph_summary.get_summary_writer().add_summary(summary_protobuf, global_step=global_step)
return 0
def Log(summary_protobuf) -> dtypes.int64:
eprint("Writing summary!")
graph_summary.get_summary_writer().add_summary(summary_protobuf)
return 0
def Debug(x) -> dtypes.int64:
eprint("Debug %s" % x)
return 0
|
src/acrtransfer/azext_acrtransfer/_client_factory.py | Mannan2812/azure-cli-extensions | 207 | 12727623 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
def cf_acrtransfer(cli_ctx, *_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azext_acrtransfer.vendored_sdks.containerregistry.v2019_12_01_preview._container_registry_management_client import ContainerRegistryManagementClient
return get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient)
|
river/tests/admin.py | JohnieBraaf/django-river | 705 | 12727647 | <gh_stars>100-1000
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from river.tests.models import BasicTestModel
class BasicTestModelAdmin(ModelAdmin):
pass
admin.site.register(BasicTestModel, BasicTestModelAdmin)
|
util/__init__.py | SebastianoF/pyro2 | 151 | 12727649 | """This module provides utility functions for pyro"""
__all__ = ['runparams', 'profile', 'plot_tools']
|
python/fserver.py | VMAJSTER/cloudfire | 194 | 12727654 | #!/usr/bin/env python
#
# Be a Fast CGI server, running on a localhost port.
#
import click, redis, os, sys
from flup.server.fcgi import WSGIServer
class MyWSGIServer(WSGIServer):
# See .../flup/server/fcgi_base.py for original version
debug = False # just in case
def error(self, req):
"""
This be triggerd by:
curl -v "https://hostname/..%c0%af..%c0%af..%c0%af..%c0%af..%c0%af..%c0%af..%c0%af..%c0%afetc/passwd"
"""
# NOTE: req = <flup.server.fcgi_base.Request>
# NOTE: "working outside of request context" here
# Have not found a way to get the IP address here... if I could,
# then would ban it... like this:
errorpage = """<pre>Unhandled Late Exception.\n\nSorry."""
req.stdout.write('Status: 500 Internal Server Error\r\n' +
'Content-Type: text/html\r\n\r\n' + errorpage)
'''
Redis URL spec:
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
'''
@click.command('start')
@click.option('--port', '-p', default=9999, help="FastCGI port number on localhost")
@click.option('--ip', '-h', default='127.0.0.1')
@click.option('--vhost', '-v', help="Virtual hostname to use")
@click.option('--redis-url', '-r', default='redis://localhost:6379/', help="URL for Redis server")
@click.option('--devmode', '-d', is_flag=False, help="Runs locally as web server. Dev only")
def start_server(ip, port, devmode, redis_url, vhost):
from example_app import app
RDB = redis.Redis.from_url(redis_url)
app.my_vhosts.append(vhost)
app.redis = RDB
app.start_bg_tasks()
# test redis is working early
try:
print "Redis dbsize: %s" % RDB.dbsize()
except redis.exceptions.ConnectionError, e:
print str(e)
sys.exit(1)
if devmode:
app.debug = True
app.run(host="0.0.0.0", port=port)
else:
print "Running as FastCGI at %s:%d" % (ip, port)
MyWSGIServer(app, bindAddress=(ip, port), multiplexed=True, umask=0).run()
if __name__ == '__main__':
start_server()
|
tests/python/twitter/common/rpc/finagle/test_span.py | zhouyijiaren/commons | 1,143 | 12727659 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import pytest
from twitter.common.rpc.finagle.trace import SpanId
def test_span_from_value():
# hex regex works
with pytest.raises(SpanId.InvalidSpanId):
SpanId.from_value('1234')
assert SpanId.from_value('0000000000001234').value == int('1234', 16)
assert SpanId.from_value(1234).value == 1234
assert SpanId.from_value(SpanId(1234)).value == 1234
assert SpanId.from_value(None).value is None
|
networks/keras/examples/imdb_cnn.py | redoclag/plaidml | 4,535 | 12727711 | '''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
import numpy as np
import keras.callbacks
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from example_correctness_test_utils import TrainingHistory, StopwatchManager
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
train_input_truncation = 1500
test_input_truncation = 200
x_train = x_train[:train_input_truncation]
y_train = y_train[:train_input_truncation]
x_test = x_test[:test_input_truncation]
y_test = y_test[:test_input_truncation]
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
history = TrainingHistory()
sw_manager = StopwatchManager(stop_watch, compile_stop_watch)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[history, sw_manager])
output.contents = np.array([history.acc, history.loss, history.val_acc, history.val_loss])
|
jmetal/util/test/test_constraint_handling.py | 12yuens2/jMetalPy | 335 | 12727727 | import unittest
from jmetal.core.solution import Solution
from jmetal.util.constraint_handling import is_feasible, number_of_violated_constraints, \
overall_constraint_violation_degree, feasibility_ratio
class ConstraintHandlingTestCases(unittest.TestCase):
def test_should_is_feasible_return_true_if_the_solution_has_no_constraints(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=0)
self.assertEqual(True, is_feasible(solution))
def test_should_is_feasible_return_true_if_the_solution_has_constraints_and_is_feasible(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=1)
solution.constraints[0] = 0
self.assertEqual(True, is_feasible(solution))
def test_should_is_feasible_return_false_if_the_solution_has_is_not_feasible(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=1)
solution.constraints[0] = -1
self.assertEqual(False, is_feasible(solution))
def test_should_number_of_violated_constraints_return_zero_if_the_solution_has_no_constraints(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=0)
self.assertEqual(0, number_of_violated_constraints(solution))
def test_should_number_of_violated_constraints_return_zero_if_the_solution_has_not_violated_constraints(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=2)
self.assertEqual(0, number_of_violated_constraints(solution))
def test_should_number_of_violated_constraints_return_the_right_number_of_violated_constraints(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=2)
solution.constraints[0] = 0
solution.constraints[1] = -2
self.assertEqual(1, number_of_violated_constraints(solution))
def test_should_constraint_violation_degree_return_zero_if_the_solution_has_no_constraints(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=0)
self.assertEqual(0, overall_constraint_violation_degree(solution))
def test_should_constraint_violation_degree_return_zero_if_the_solution_has_not_violated_constraints(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=2)
self.assertEqual(0, overall_constraint_violation_degree(solution))
def test_should_constraint_violation_degree_return_the_right_violation_degree(self) -> None:
solution = Solution(number_of_variables=2, number_of_objectives=2, number_of_constraints=2)
solution.constraints[0] = -1
solution.constraints[1] = -2
self.assertEqual(-3, overall_constraint_violation_degree(solution))
def test_should_feasibility_ratio_raise_and_exception_if_the_solution_list_is_empty(self) -> None:
with self.assertRaises(Exception):
feasibility_ratio([])
def test_should_feasibility_ratio_return_zero_if_all_the_solutions_in_a_list_are_unfeasible(self) -> None:
solution1 = Solution(2, 2, 2)
solution2 = Solution(2, 2, 2)
solution1.constraints[0] = 0
solution1.constraints[1] = -1
solution2.constraints[0] = -2
solution2.constraints[1] = 0
self.assertEqual(0, feasibility_ratio([solution1, solution2]))
def test_should_feasibility_ratio_return_one_if_all_the_solutions_in_a_list_are_feasible(self) -> None:
solution1 = Solution(2, 2, 2)
solution2 = Solution(2, 2, 2)
solution1.constraints[0] = 0
solution1.constraints[1] = 0
solution2.constraints[0] = 0
solution2.constraints[1] = 0
self.assertEqual(1.0, feasibility_ratio([solution1, solution2]))
def test_should_feasibility_ratio_return_the_right_percentage_of_feasible_solutions(self) -> None:
solution1 = Solution(2, 2, 1)
solution2 = Solution(2, 2, 1)
solution3 = Solution(2, 2, 1)
solution1.constraints[0] = -1
solution2.constraints[0] = 0
solution3.constraints[0] = -2
self.assertEqual(1/3, feasibility_ratio([solution1, solution2, solution3]))
if __name__ == '__main__':
unittest.main()
|
lib/galaxy/webapps/base/api.py | rhpvorderman/galaxy | 1,085 | 12727746 | <filename>lib/galaxy/webapps/base/api.py
from fastapi import FastAPI, Request
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from starlette.responses import Response
try:
from starlette_context.middleware import RawContextMiddleware
from starlette_context.plugins import RequestIdPlugin
except ImportError:
pass
from galaxy.exceptions import MessageException
from galaxy.web.framework.base import walk_controller_modules
from galaxy.web.framework.decorators import (
api_error_message,
validation_error_to_message_exception
)
def add_exception_handler(
app: FastAPI
) -> None:
@app.exception_handler(RequestValidationError)
async def validate_exception_middleware(request: Request, exc: RequestValidationError) -> Response:
exc = validation_error_to_message_exception(exc)
error_dict = api_error_message(None, exception=exc)
return JSONResponse(
status_code=400,
content=error_dict
)
@app.exception_handler(MessageException)
async def message_exception_middleware(request: Request, exc: MessageException) -> Response:
error_dict = api_error_message(None, exception=exc)
return JSONResponse(
status_code=exc.status_code,
content=error_dict
)
def add_request_id_middleware(app: FastAPI):
app.add_middleware(RawContextMiddleware, plugins=(RequestIdPlugin(force_new_uuid=True),))
def include_all_package_routers(app: FastAPI, package_name: str):
for _, module in walk_controller_modules(package_name):
router = getattr(module, "router", None)
if router:
app.include_router(router)
|
Python/random/generate_number_image.py | saneravi/ML_Stuff | 209 | 12727756 | <reponame>saneravi/ML_Stuff<gh_stars>100-1000
#!/usr/bin/env python
"""
Generate an image where the x-axis is the seed, the y-axis is the random number.
"""
# core modules
import random
import click
import imageio
import javarandom
# 3rd party
import numpy as np
from numpy.random import MT19937, SFC64, Generator, Philox
from randomgen import RandomGenerator, ThreeFry, Xoroshiro128, Xorshift1024
@click.command()
@click.option("--size", default=1000, help="Number of seeds / elements to check")
@click.option(
"--prng",
type=click.Choice(
[
"java",
"python",
"numpy",
"Xoroshiro128",
"MT19937",
"Philox",
"SFC64",
"Xorshift1024",
"ThreeFry",
]
),
default="python",
)
def cli(size, prng):
generate_image(size=size, prng=prng)
def generate_image(size, prng):
allowed_prngs = [
"java",
"python",
"numpy",
"Xoroshiro128",
"MT19937",
"Philox",
"SFC64",
"Xorshift1024",
"ThreeFry",
]
if prng not in allowed_prngs:
raise ValueError(f"prng={prng} is not in {allowed_prngs}")
arr = np.zeros((size, size))
for i in range(size):
if prng == "python":
random.seed(i)
elif prng == "numpy":
np.random.seed(i)
elif prng == "java":
rnd = javarandom.Random(i)
elif prng == "Xoroshiro128":
rnd = RandomGenerator(Xoroshiro128())
elif prng == "Xorshift1024":
rnd = RandomGenerator(Xorshift1024())
elif prng == "ThreeFry":
rnd = RandomGenerator(ThreeFry())
elif prng == "MT19937":
rnd = Generator(MT19937())
elif prng == "Philox":
rnd = Generator(Philox())
elif prng == "SFC64":
rnd = Generator(SFC64())
for j in range(size):
if prng == "python":
random_number = random.random()
elif prng == "numpy":
random_number = np.random.random()
elif prng == "java":
random_number = rnd.nextDouble()
elif prng in ["Xoroshiro128", "Xorshift1024", "ThreeFry"]:
random_number = rnd.random_sample()
elif prng in ["MT19937", "Philox", "SFC64"]:
random_number = rnd.random()
arr[j, i] = random_number
print("{}\t{}\t{}".format(i, arr[0, i], arr[1, i]))
imageio.imwrite(f"1000-random-numbers-{prng}.png", arr)
if __name__ == "__main__":
cli()
|
up/tasks/distill/models/utils.py | ModelTC/EOD | 196 | 12727775 | import torch
from up.tasks.det.models.utils.assigner import map_rois_to_level
from up.tasks.det.models.utils.bbox_helper import (
clip_bbox,
filter_by_size
)
def mlvl_extract_roi_features(rois, x_features, fpn_levels,
fpn_strides, base_scale, roi_extractor,
return_recover_inds=False):
x_rois, recover_inds = map_rois_to_level(fpn_levels, base_scale, rois)
mlvl_rois = []
mlvl_features = []
mlvl_strides = []
for lvl_idx in fpn_levels:
if x_rois[lvl_idx].numel() > 0:
mlvl_rois.append(x_rois[lvl_idx])
mlvl_features.append(x_features[lvl_idx])
mlvl_strides.append(fpn_strides[lvl_idx])
assert len(mlvl_rois) > 0, "No rois provided for mimic stage"
pooled_feats = [roi_extractor(*args) for args in zip(mlvl_rois, mlvl_features, mlvl_strides)]
if return_recover_inds:
return torch.cat(pooled_feats, dim=0), recover_inds
else:
return torch.cat(pooled_feats, dim=0)
def mlvl_extract_gt_masks(gt_bboxes, fpn_levels, fpn_strides, base_scale, featmap_sizes):
gt_tensors = []
for b_ix in range(len(gt_bboxes)):
gt, _ = filter_by_size(gt_bboxes[b_ix], min_size=1)
bdx = gt.new_ones(gt.shape[0], 1) * b_ix
gt_tensors.append(torch.cat([bdx, gt[:, :4]], dim=1))
gt_bboxes, _ = map_rois_to_level(fpn_levels, base_scale, torch.cat(gt_tensors, dim=0))
imit_range = [0, 0, 0, 0, 0]
with torch.no_grad():
masks = []
for idx in range(len(featmap_sizes)):
b, _, h, w = featmap_sizes[idx]
gt_level = gt_bboxes[idx]
mask = gt_level.new_zeros(b, h, w)
for gt in gt_level:
gt_level_map = gt[1:] / fpn_strides[idx]
lx = max(int(gt_level_map[0]) - imit_range[idx], 0)
rx = min(int(gt_level_map[2]) + imit_range[idx], w)
ly = max(int(gt_level_map[1]) - imit_range[idx], 0)
ry = min(int(gt_level_map[3]) + imit_range[idx], h)
if (lx == rx) or (ly == ry):
mask[int(gt[0]), ly, lx] += 1
else:
mask[int(gt[0]), ly:ry, lx:rx] += 1
mask = (mask > 0).type_as(gt_level)
masks.append(mask)
return masks
def match_gts(proposals, gt_bboxes, ignore_regions, image_info, matcher):
B = len(gt_bboxes)
if ignore_regions is None:
ignore_regions = [None] * B
labels = proposals.new_zeros(proposals.shape[0]).long()
for b_ix in range(B):
bind = torch.where(proposals[:, 0] == b_ix)[0]
rois = proposals[bind]
# rois = proposals[proposals[:, 0] == b_ix]
if rois.numel() > 0:
# remove batch idx, score & label
rois = rois[:, 1:1 + 4]
else:
rois = rois.view(-1, 4)
# filter gt_bboxes which are too small
gt, _ = filter_by_size(gt_bboxes[b_ix], min_size=1)
# clip rois which are out of bounds
rois = clip_bbox(rois, image_info[b_ix])
rois_target_gt, overlaps = matcher.match(
rois, gt, ignore_regions[b_ix], return_max_overlaps=True)
pos_inds = (rois_target_gt >= 0).long()
labels[bind] = pos_inds
return labels
|
boto3_type_annotations/boto3_type_annotations/application_autoscaling/client.py | cowboygneox/boto3_type_annotations | 119 | 12727843 | <reponame>cowboygneox/boto3_type_annotations
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def delete_scaling_policy(self, PolicyName: str, ServiceNamespace: str, ResourceId: str, ScalableDimension: str) -> Dict:
pass
def delete_scheduled_action(self, ServiceNamespace: str, ScheduledActionName: str, ResourceId: str, ScalableDimension: str) -> Dict:
pass
def deregister_scalable_target(self, ServiceNamespace: str, ResourceId: str, ScalableDimension: str) -> Dict:
pass
def describe_scalable_targets(self, ServiceNamespace: str, ResourceIds: List = None, ScalableDimension: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_scaling_activities(self, ServiceNamespace: str, ResourceId: str = None, ScalableDimension: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_scaling_policies(self, ServiceNamespace: str, PolicyNames: List = None, ResourceId: str = None, ScalableDimension: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_scheduled_actions(self, ServiceNamespace: str, ScheduledActionNames: List = None, ResourceId: str = None, ScalableDimension: str = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def put_scaling_policy(self, PolicyName: str, ServiceNamespace: str, ResourceId: str, ScalableDimension: str, PolicyType: str = None, StepScalingPolicyConfiguration: Dict = None, TargetTrackingScalingPolicyConfiguration: Dict = None) -> Dict:
pass
def put_scheduled_action(self, ServiceNamespace: str, ScheduledActionName: str, ResourceId: str, ScalableDimension: str, Schedule: str = None, StartTime: datetime = None, EndTime: datetime = None, ScalableTargetAction: Dict = None) -> Dict:
pass
def register_scalable_target(self, ServiceNamespace: str, ResourceId: str, ScalableDimension: str, MinCapacity: int = None, MaxCapacity: int = None, RoleARN: str = None) -> Dict:
pass
|
AIPacman/Algorithm_1/Algorithm_1_v2/demo.py | sanmusane/AIGames | 543 | 12727865 | <reponame>sanmusane/AIGames
'''
Function:
show the effect of trained model used in game
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import torch
import config
from nets.nets import DQNet, DQNAgent
from gameAPI.game import GamePacmanAgent
'''run demo'''
def runDemo():
if config.operator == 'ai':
game_pacman_agent = GamePacmanAgent(config)
dqn_net = DQNet(config)
dqn_net.load_state_dict(torch.load(config.weightspath))
dqn_agent = DQNAgent(game_pacman_agent, dqn_net, config)
dqn_agent.test()
elif config.operator == 'person':
GamePacmanAgent(config).runGame()
else:
raise ValueError('config.operator should be <ai> or <person>...')
'''run'''
if __name__ == '__main__':
runDemo() |
sphinx-sources/Examples/Commands/AperturesandScreens2.py | jccmak/lightpipes | 132 | 12727927 | <filename>sphinx-sources/Examples/Commands/AperturesandScreens2.py
from LightPipes import *
import matplotlib.pyplot as plt
GridSize = 10*mm
GridDimension = 256
lambda_ = 1000*nm #lambda_ is used because lambda is a Python build-in function.
R=2.5*mm #Radius of the aperture
xs=1*mm; ys=1*mm#shift of the aperture
Field = Begin(GridSize, lambda_, GridDimension)
Field=CircScreen(0.7*mm,1*mm,1.5*mm,Field)
Field=RectScreen(1*mm,1*mm,-1.5*mm,-1.5*mm,-0.002,Field)
Field=RectScreen(1*mm,3.5*mm,-2*mm,2.5*mm,30,Field)
Field=GaussAperture(4*mm,0,0,1,Field)
I=Intensity(0,Field)
plt.imshow(I); plt.axis('off')
plt.show()
|
test/test_cpu_stat.py | cuongnguyen2190/pure-python-adb | 382 | 12727957 | <reponame>cuongnguyen2190/pure-python-adb
def test_get_cpu_times(device):
result = device.cpu_times()
assert result is not None
def test_get_cpu_percent(device):
percent = device.cpu_percent(interval=1)
assert percent is not None
assert percent != 0
def test_get_cpu_count(device):
assert device.cpu_count() == 2
|
SubCircuits/gain_stage.py | pingdynasty/KlonCentaur | 210 | 12727971 | import numpy as np
from numpy.polynomial import polynomial as poly
import scipy.signal as signal
import matplotlib.pyplot as plt
# Component values
GAIN = 1.0
R6 = 10e3
Ra = 100e3 * GAIN
R10b = 2e3 + 100e3 * (1-GAIN)
R11 = 15e3
R12 = 422e3
C3 = 0.1e-6
C5 = 68e-9
C7 = 82e-9
C8 = 390e-12
a0s = C7 * C8 * R10b * R11 * R12
a1s = C7 * R10b * R11 + C8 * R12 * (R10b + R11)
a2s = R10b + R11
b0s = a0s
b1s = C7 * R11 * R12 + a1s
b2s = R12 + a2s
w, h = signal.freqs([b0s, b1s, b2s], [a0s, a1s, a2s], worN=np.logspace(1.3, 4.3, 1000)*(2*np.pi))
plt.semilogx(w/(2*np.pi), 20*np.log10(np.abs(h+np.finfo(float).eps)))
plt.show()
# Create impedances
# z1Num = R6 # poly.Polynomial((1, R6 * (C3 + C5)))
# z1Den = poly.Polynomial((0, C3, R6 * C3 * C5))
# z2Num = R10b + R11 # poly.Polynomial((R10b + R11, C7 * R10b * R11))
# z2Den = 1.0 # poly.Polynomial((1, C7 * R11))
# z3Num = R12
# z3Den = 1 # poly.Polynomial((1, C8 * R12))
# # Simplify
# b_s = z1Den * Ra * (z3Den * z2Num + z2Den * z3Num)
# a_s = z2Den * z3Den * (Ra * z1Den + z1Num)
# print(b_s.coef)
# print(a_s.coef)
# w, h = signal.freqs(b_s.coef, a_s.coef, worN=np.logspace(0, 2, 1000)*(2*np.pi))
# plt.semilogx(w/(2*np.pi), 20*np.log10(np.abs(h+np.finfo(float).eps)))
# plt.show()
|
tests/generic/generators.py | dubesar/model_bakery | 448 | 12728001 | def gen_value_string():
return 'value'
|
opennmt/tests/sequence_tagger_test.py | gcervantes8/OpenNMT-tf | 1,363 | 12728003 | <gh_stars>1000+
import numpy as np
import tensorflow as tf
from opennmt.models import sequence_tagger
class SequenceTaggerTest(tf.test.TestCase):
def _testTagSchemeFlags(
self,
tag_fn,
labels,
predicted,
expected_true_positives,
expected_false_positives,
expected_false_negatives,
):
labels = np.array([[tf.compat.as_bytes(c) for c in labels]])
predicted = np.array([[tf.compat.as_bytes(c) for c in predicted]])
gold_flags, predicted_flags = tag_fn(labels, predicted)
true_positives = tf.keras.metrics.TruePositives()
false_positives = tf.keras.metrics.FalsePositives()
false_negatives = tf.keras.metrics.FalseNegatives()
true_positives.update_state(gold_flags, predicted_flags)
false_positives.update_state(gold_flags, predicted_flags)
false_negatives.update_state(gold_flags, predicted_flags)
tp = self.evaluate(true_positives.result())
fp = self.evaluate(false_positives.result())
fn = self.evaluate(false_negatives.result())
self.assertEqual(expected_true_positives, tp, msg="true positives mismatch")
self.assertEqual(expected_false_positives, fp, msg="false positives mismatch")
self.assertEqual(expected_false_negatives, fn, msg="false negatives mismatch")
def testBIOESFlags(self):
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
["S-LOC"],
["S-ORG"],
expected_true_positives=0,
expected_false_positives=1,
expected_false_negatives=1,
)
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
["B-LOC", "I-LOC", "E-LOC"],
["B-LOC", "I-LOC", "E-LOC"],
expected_true_positives=1,
expected_false_positives=0,
expected_false_negatives=0,
)
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
["O", "B-LOC", "I-LOC", "E-LOC"],
["B-LOC", "I-LOC", "E-LOC", "O"],
expected_true_positives=0,
expected_false_positives=1,
expected_false_negatives=1,
)
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
["B-LOC", "I-LOC", "E-LOC"],
["B-LOC", "E-LOC", "S-LOC"],
expected_true_positives=0,
expected_false_positives=2,
expected_false_negatives=1,
)
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
["B-LOC", "I-LOC", "E-LOC"],
["S-LOC", "O", "O"],
expected_true_positives=0,
expected_false_positives=1,
expected_false_negatives=1,
)
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
["S-LOC", "O"],
["B-LOC", "E-LOC"],
expected_true_positives=0,
expected_false_positives=1,
expected_false_negatives=1,
)
self._testTagSchemeFlags(
sequence_tagger.flag_bioes_tags,
[
"B-ORG",
"E-ORG",
"O",
"B-PER",
"E-PER",
"O",
"O",
"O",
"O",
"B-MISC",
"E-MISC",
"O",
],
[
"B-ORG",
"E-ORG",
"S-PER",
"S-PER",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"S-MISC",
],
expected_true_positives=1,
expected_false_positives=3,
expected_false_negatives=2,
)
if __name__ == "__main__":
tf.test.main()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.