filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_14306 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two step v2-compatible pipeline."""
from kfp import components, dsl
from kfp.components import InputPath, OutputPath
def preprocess(uri: str, some_int: int, output_parameter_one: OutputPath(int),
output_dataset_one: OutputPath('Dataset')):
"""Dummy Preprocess Step."""
with open(output_dataset_one, 'w') as f:
f.write('Output dataset')
with open(output_parameter_one, 'w') as f:
f.write("{}".format(1234))
preprocess_op = components.create_component_from_func(
preprocess, base_image='python:3.9')
@components.create_component_from_func
def train_op(dataset: InputPath('Dataset'),
model: OutputPath('Model'),
num_steps: int = 100):
"""Dummy Training Step."""
with open(dataset, 'r') as input_file:
input_string = input_file.read()
with open(model, 'w') as output_file:
for i in range(num_steps):
output_file.write("Step {}\n{}\n=====\n".format(
i, input_string))
@dsl.pipeline(name='two_step_pipeline')
def two_step_pipeline():
preprocess_task = preprocess_op(uri='uri-to-import', some_int=12)
train_task = train_op(
num_steps=preprocess_task.outputs['output_parameter_one'],
dataset=preprocess_task.outputs['output_dataset_one'])
|
the-stack_0_14307 | # -*- coding: utf-8 -*-
"""
Django settings for nectR Tutoring project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (nectr/config/settings/base.py - 3 = nectr/)
APPS_DIR = ROOT_DIR.path('nectr')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
# 'haystack', # search
'postman', # messaging
'channels' # chat
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'nectr.users.apps.UsersConfig',
# Your stuff: custom apps go here
'nectr.student.apps.StudentConfig',
'nectr.tutor.apps.TutorConfig',
'nectr.dashboard.apps.DashboardConfig',
'nectr.courses.apps.CoursesConfig',
'nectr.skills.apps.SkillsConfig',
'nectr.chat.apps.ChatConfig',
'nectr.schedule.apps.ScheduleConfig'
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'nectr.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
('Brandon', '[email protected]'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///nectr'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates'))
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'nectr.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'nectr.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ['nectr.taskapp.celery.CeleryConfig']
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ['kombu.transport.django']
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
########## END CELERY
# django-compressor
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['compressor']
STATICFILES_FINDERS += ['compressor.finders.CompressorFinder']
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# Search Integration using Haystack
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr'
# ...or for multicore...
# 'URL': 'http://127.0.0.1:8983/solr/mysite',
},
}
# Basic channels configuration
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "config.routing.channel_routing",
},
}
|
the-stack_0_14309 | # -*- coding: utf-8 -*-
import argparse
from functools import partial
from moviepy.editor import VideoFileClip, CompositeVideoClip
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import os
from PIL import Image, ImageDraw, ImageFont
from pprint import pprint
import subprocess
import sys
from lib.collection_utils import *
from lib.color_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.processing_utils import *
from lib.text_utils import *
from lib.video_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="path/to/item.mp4", help="Input media file")
parser.add_argument('-sdata', dest="SAMPLE_DATA_FILE", default="path/to/sampledata.csv", help="Input csv sampldata file")
parser.add_argument('-pdata', dest="PHRASE_DATA_FILE", default="", help="Input csv phrase data file; blank if none")
parser.add_argument('-outframe', dest="OUTPUT_FRAME", default="tmp/item_viz/frame.%s.png", help="Temporary output frames pattern")
parser.add_argument('-width', dest="WIDTH", default=1280, type=int, help="Output video width")
parser.add_argument('-height', dest="HEIGHT", default=720, type=int, help="Output video height")
parser.add_argument('-fsize', dest="FONT_SIZE", default=24, type=int, help="Font size of timecode")
parser.add_argument('-speed', dest="SPEED", default=48.0, type=float, help="Speed of viz in pixels per second")
parser.add_argument('-out', dest="OUTPUT_FILE", default="output/item_viz.mp4", help="Output media file")
parser.add_argument('-quality', dest="QUALITY", default="high", help="High quality is slower")
parser.add_argument('-threads', dest="THREADS", default=3, type=int, help="Amount of parallel frames to process")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just view statistics?")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing frames?")
addTextArguments(parser)
a = parser.parse_args()
aa = vars(a)
MARGIN = min(roundInt(a.HEIGHT * 0.1), 20)
PHRASE_HEIGHT = MARGIN * 2
fieldNames, sampledata = readCsv(a.SAMPLE_DATA_FILE)
phrasedata = []
if len(a.PHRASE_DATA_FILE) > 0:
_, phrasedata = readCsv(a.PHRASE_DATA_FILE)
phrasedata = addNormalizedValues(phrasedata, "clarity", "nclarity")
hasPhrases = len(phrasedata) > 0
makeDirectories([a.OUTPUT_FRAME, a.OUTPUT_FILE])
# determine video properties from the first clip
baseVideo = VideoFileClip(a.INPUT_FILE)
width, height = baseVideo.size
fps = round(baseVideo.fps, 2)
duration = baseVideo.duration
print("Base video: (%s x %s) %sfps %s" % (width, height, fps, formatSeconds(duration)))
if a.PROBE:
sys.exit()
# Make the base video smaller and place in the center-ish
vratio = 1.0 * width / height
vh = roundInt(a.HEIGHT / 2.0)
vw = roundInt(vh * vratio)
vx = roundInt((a.WIDTH - vw) * 0.5)
vy = roundInt((a.HEIGHT - vh) * 0.25)
baseVideo = baseVideo.resize((vw, vh)).set_pos((vx, vy))
# Determine size/positioning of timecode text
font = ImageFont.truetype(font=a.FONT_DIR+a.DEFAULT_FONT_FILE, size=a.FONT_SIZE, layout_engine=ImageFont.LAYOUT_RAQM)
ftemplate = "00:00" if duration < 60 * 60 else "00:00:00"
fwidth, fheight = font.getsize(ftemplate)
tx = roundInt((a.WIDTH - fwidth) * 0.5)
ty = vy + vh + MARGIN
# Assign times, colors, and dimensions to sampledata
sy = ty + fheight + MARGIN
maxSHeight = a.HEIGHT - sy - MARGIN * 0.5
if hasPhrases:
maxSHeight = a.HEIGHT - PHRASE_HEIGHT - sy - MARGIN
if maxSHeight < 10:
print("Data height too small")
sys.exit()
sampledata = addNormalizedValues(sampledata, "clarity", "nclarity")
sampledata = addNormalizedValues(sampledata, "power", "npower")
totalSequenceWidth = duration * a.SPEED
cx = a.WIDTH * 0.5
seqX0 = cx
seqX1 = cx - totalSequenceWidth
for i, s in enumerate(sampledata):
sampledata[i]["color"] = getColorGradientValue(s["nclarity"])
# determine pos and size
nx = s["start"] / 1000.0 / duration
nw = s["dur"] / 1000.0 / duration
nh = s["npower"]
myH = max(roundInt(maxSHeight * nh), 4)
sampledata[i]["sY"] = roundInt(sy + (maxSHeight - myH))
sampledata[i]["sX"] = roundInt(totalSequenceWidth * nx)
sampledata[i]["sW"] = roundInt(totalSequenceWidth * nw)
sampledata[i]["sH"] = myH
# calculate dimensions for phrase data
for i, p in enumerate(phrasedata):
nx = p["start"] / 1000.0 / duration
nw = p["dur"] / 1000.0 / duration
phrasedata[i]["sY"] = roundInt(sy + maxSHeight + MARGIN)
phrasedata[i]["sW"] = roundInt(totalSequenceWidth * nw)
phrasedata[i]["sX"] = roundInt(totalSequenceWidth * nx)
phrasedata[i]["sH"] = roundInt(PHRASE_HEIGHT)
phrasedata[i]["color"] = getColorGradientValue(lerp((0.5, 1.0), p["nclarity"]))
# Generate annotation frames
frameProps = []
totalFrames = msToFrame(roundInt(duration*1000), fps)
for i in range(totalFrames):
frame = i+1
filename = a.OUTPUT_FRAME % zeroPad(frame, totalFrames)
frameProps.append({
"frame": frame,
"filename": filename
})
def doFrame(p, totalFrames, drawData):
global a
global MARGIN
global cx
global duration
global seqX0
global seqX1
global font
global tx
global ty
global sy
global maxSHeight
if os.path.isfile(p["filename"]):
return
im = Image.new(mode="RGB", size=(a.WIDTH, a.HEIGHT), color=(0, 0, 0))
draw = ImageDraw.Draw(im)
nprogress = 1.0 * (p["frame"] - 1) / (totalFrames - 1)
# draw text
seconds = duration * nprogress
timestring = formatSeconds(seconds)
draw.text((tx, ty), timestring, font=font, fill=(255, 255, 255))
xoffset = lerp((seqX0, seqX1), nprogress)
for s in drawData:
if s["sH"] <= 0:
continue
x0 = s["sX"] + xoffset
x1 = x0 + s["sW"]
if x0 < a.WIDTH and x1 > 0:
draw.rectangle([x0, s["sY"], x1, s["sY"]+s["sH"]], fill=s["color"], outline=(0,0,0), width=1)
draw.line([(cx, sy), (cx, sy + maxSHeight)], fill=(255, 255, 255), width=1)
del draw
im.save(p["filename"])
sys.stdout.write('\r')
sys.stdout.write("Wrote %s to file" % p["filename"])
sys.stdout.flush()
if a.OVERWRITE:
removeFiles(a.OUTPUT_FRAME % "*")
drawData = sampledata + phrasedata
threads = getThreadCount(a.THREADS)
pool = ThreadPool(threads)
pclipsToFrame = partial(doFrame, totalFrames=totalFrames, drawData=drawData)
pool.map(pclipsToFrame, frameProps)
pool.close()
pool.join()
annotationVideoFn = appendToBasename(a.OUTPUT_FILE, "_annotation")
if a.OVERWRITE or not os.path.isfile(annotationVideoFn):
compileFrames(a.OUTPUT_FRAME, fps, annotationVideoFn, getZeroPadding(totalFrames))
annotationVideo = VideoFileClip(annotationVideoFn, audio=False)
clips = [annotationVideo, baseVideo]
video = CompositeVideoClip(clips, size=(a.WIDTH, a.HEIGHT))
video = video.set_duration(duration)
if a.QUALITY == "high":
video.write_videofile(a.OUTPUT_FILE, preset="slow", audio_bitrate="256k", audio_fps=48000, bitrate="19820k")
else:
video.write_videofile(a.OUTPUT_FILE)
print("Wrote %s to file" % a.OUTPUT_FILE)
|
the-stack_0_14311 | """
Shortest path algorithms for unweighted graphs.
"""
import networkx as nx
from multiprocessing import Pool
__all__ = ['bidirectional_shortest_path',
'single_source_shortest_path',
'single_source_shortest_path_length',
'single_target_shortest_path',
'single_target_shortest_path_length',
'all_pairs_shortest_path',
'all_pairs_shortest_path_length',
'predecessor']
def single_source_shortest_path_length(G, source, cutoff=None):
"""Compute the shortest path lengths from source to all reachable nodes.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dict
Dict keyed by node to shortest path length to source.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.single_source_shortest_path_length(G, 0)
>>> length[4]
4
>>> for node in length:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
See Also
--------
shortest_path_length
"""
if source not in G:
raise nx.NodeNotFound(f'Source {source} is not in G')
if cutoff is None:
cutoff = float('inf')
nextlevel = {source: 1}
return dict(_single_shortest_path_length(G.adj, nextlevel, cutoff))
def _single_shortest_path_length(adj, firstlevel, cutoff):
"""Yields (node, level) in a breadth first search
Shortest Path Length helper function
Parameters
----------
adj : dict
Adjacency dict or view
firstlevel : dict
starting nodes, e.g. {source: 1} or {target: 1}
cutoff : int or float
level at which we stop the process
"""
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
nextlevel = set(firstlevel) # set of nodes to check at next level
n = len(adj)
while nextlevel and cutoff >= level:
thislevel = nextlevel # advance to next level
nextlevel = set() # and start a new set (fringe)
found = []
for v in thislevel:
if v not in seen:
seen[v] = level # set the level of vertex v
found.append(v)
yield (v, level)
if len(seen) == n:
return
for v in found:
nextlevel.update(adj[v])
level += 1
del seen
def single_target_shortest_path_length(G, target, cutoff=None):
"""Compute the shortest path lengths to target from all reachable nodes.
Parameters
----------
G : NetworkX graph
target : node
Target node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : iterator
(source, shortest path length) iterator
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> length = dict(nx.single_target_shortest_path_length(G, 4))
>>> length[0]
4
>>> for node in range(5):
... print(f"{node}: {length[node]}")
0: 4
1: 3
2: 2
3: 1
4: 0
See Also
--------
single_source_shortest_path_length, shortest_path_length
"""
if target not in G:
raise nx.NodeNotFound(f'Target {target} is not in G')
if cutoff is None:
cutoff = float('inf')
# handle either directed or undirected
adj = G.pred if G.is_directed() else G.adj
nextlevel = {target: 1}
return _single_shortest_path_length(adj, nextlevel, cutoff)
def all_pairs_shortest_path_length(G, cutoff=None, parallel=False):
"""Computes the shortest path lengths between all nodes in `G`.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Notes
-----
The iterator returned only has reachable node pairs.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_shortest_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print(f"1 - {node}: {length[1][node]}")
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
"""
length = single_source_shortest_path_length
if parallel:
with Pool() as pool:
for n in G:
yield (n, pool.apply(length, (G, n, cutoff)))
else:
for n in G:
yield (n, length(G, n, cutoff=cutoff))
def bidirectional_shortest_path(G, source, target):
"""Returns a list of nodes in a shortest path between source and target.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
Returns
-------
path: list
List of nodes in a path from source to target.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
shortest_path
Notes
-----
This algorithm is used by shortest_path(G, source, target).
"""
if source not in G or target not in G:
msg = f"Either source {source} or target {target} is not in G"
raise nx.NodeNotFound(msg)
# call helper to do the real work
results = _bidirectional_pred_succ(G, source, target)
pred, succ, w = results
# build path from pred+w+succ
path = []
# from source to w
while w is not None:
path.append(w)
w = pred[w]
path.reverse()
# from w to target
w = succ[path[-1]]
while w is not None:
path.append(w)
w = succ[w]
return path
def _bidirectional_pred_succ(G, source, target):
"""Bidirectional shortest path helper.
Returns (pred, succ, w) where
pred is a dictionary of predecessors from w to the source, and
succ is a dictionary of successors from w to the target.
"""
# does BFS from both source and target and meets in the middle
if target == source:
return ({target: None}, {source: None}, source)
# handle either directed or undirected
if G.is_directed():
Gpred = G.pred
Gsucc = G.succ
else:
Gpred = G.adj
Gsucc = G.adj
# predecesssor and successors in search
pred = {source: None}
succ = {target: None}
# initialize fringes, start with forward
forward_fringe = [source]
reverse_fringe = [target]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
this_level = forward_fringe
forward_fringe = []
for v in this_level:
for w in Gsucc[v]:
if w not in pred:
forward_fringe.append(w)
pred[w] = v
if w in succ: # path found
return pred, succ, w
else:
this_level = reverse_fringe
reverse_fringe = []
for v in this_level:
for w in Gpred[v]:
if w not in succ:
succ[w] = v
reverse_fringe.append(w)
if w in pred: # found path
return pred, succ, w
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
def single_source_shortest_path(G, source, cutoff=None):
"""Compute shortest path between source
and all other nodes reachable from source.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_shortest_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path
"""
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
def join(p1, p2):
return p1 + p2
if cutoff is None:
cutoff = float('inf')
nextlevel = {source: 1} # list of nodes to check at next level
paths = {source: [source]} # paths dictionary (paths to key from source)
return dict(_single_shortest_path(G.adj, nextlevel, paths, cutoff, join))
def _single_shortest_path(adj, firstlevel, paths, cutoff, join):
"""Returns shortest paths
Shortest Path helper function
Parameters
----------
adj : dict
Adjacency dict or view
firstlevel : dict
starting nodes, e.g. {source: 1} or {target: 1}
paths : dict
paths for starting nodes, e.g. {source: [source]}
cutoff : int or float
level at which we stop the process
join : function
function to construct a path from two partial paths. Requires two
list inputs `p1` and `p2`, and returns a list. Usually returns
`p1 + p2` (forward from source) or `p2 + p1` (backward from target)
"""
level = 0 # the current level
nextlevel = firstlevel
while nextlevel and cutoff > level:
thislevel = nextlevel
nextlevel = {}
for v in thislevel:
for w in adj[v]:
if w not in paths:
paths[w] = join(paths[v], [w])
nextlevel[w] = 1
level += 1
return paths
def single_target_shortest_path(G, target, cutoff=None):
"""Compute shortest path to target from all nodes that reach target.
Parameters
----------
G : NetworkX graph
target : node label
Target node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> path = nx.single_target_shortest_path(G, 4)
>>> path[0]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path, single_source_shortest_path
"""
if target not in G:
raise nx.NodeNotFound(f"Target {target} not in G")
def join(p1, p2):
return p2 + p1
# handle undirected graphs
adj = G.pred if G.is_directed() else G.adj
if cutoff is None:
cutoff = float('inf')
nextlevel = {target: 1} # list of nodes to check at next level
paths = {target: [target]} # paths dictionary (paths to key from source)
return dict(_single_shortest_path(adj, nextlevel, paths, cutoff, join))
def all_pairs_shortest_path(G, cutoff=None, parallel=False):
"""Compute shortest paths between all nodes.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_shortest_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
See Also
--------
floyd_warshall()
"""
if parallel:
with Pool() as pool:
for n in G:
yield (n, pool.apply(single_source_shortest_path, (G, n, cutoff)))
else:
for n in G:
yield (n, single_source_shortest_path(G, n, cutoff=cutoff))
def predecessor(G, source, target=None, cutoff=None, return_seen=None):
"""Returns dict of predecessors for the path from source to all nodes in G
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path. If provided only predecessors between
source and target are returned
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred : dictionary
Dictionary, keyed by node, of predecessors in the shortest path.
Examples
--------
>>> G = nx.path_graph(4)
>>> list(G)
[0, 1, 2, 3]
>>> nx.predecessor(G, 0)
{0: [], 1: [0], 2: [1], 3: [2]}
"""
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
level = 0 # the current level
nextlevel = [source] # list of nodes to check at next level
seen = {source: level} # level (number of hops) when seen in BFS
pred = {source: []} # predecessor dictionary
while nextlevel:
level = level + 1
thislevel = nextlevel
nextlevel = []
for v in thislevel:
for w in G[v]:
if w not in seen:
pred[w] = [v]
seen[w] = level
nextlevel.append(w)
elif (seen[w] == level): # add v to predecessor list if it
pred[w].append(v) # is at the correct level
if (cutoff and cutoff <= level):
break
if target is not None:
if return_seen:
if target not in pred:
return ([], -1) # No predecessor
return (pred[target], seen[target])
else:
if target not in pred:
return [] # No predecessor
return pred[target]
else:
if return_seen:
return (pred, seen)
else:
return pred
|
the-stack_0_14315 | # -*- coding: utf-8 -*-
"""
Created on Wed May 20 12:30:52 2020
@author: nastavirs
"""
import tensorflow as tf
import numpy as np
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases |
the-stack_0_14318 | import glob, imp, os
IPHONE_UA = "Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1"
def discover_drivers():
cdir = os.path.dirname(os.path.realpath(__file__))
drivers = list(filter(lambda p: not os.path.basename(p).startswith('_'), glob.glob(os.path.join(cdir, '*.py'))))
return dict([(os.path.basename(os.path.splitext(driver)[0]), driver) for driver in drivers])
def get_driver_by_name(name):
for driver_name, driver_path in discover_drivers().items():
if driver_name == name:
return imp.load_source(driver_name, driver_path)
raise ModuleNotFoundError("%s driver not found", name)
|
the-stack_0_14319 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 the HERA Project
# Licensed under the MIT License
from hera_qm import utils
from hera_qm.auto_metrics import auto_metrics_run
import sys
ap = utils.get_metrics_ArgumentParser('auto_metrics')
args = ap.parse_args()
history = ' '.join(sys.argv)
auto_metrics_run(args.metric_outfile,
args.raw_auto_files,
median_round_modz_cut=args.median_round_modz_cut,
mean_round_modz_cut=args.mean_round_modz_cut,
edge_cut=args.edge_cut,
Kt=args.Kt,
Kf=args.Kf,
sig_init=args.sig_init,
sig_adj=args.sig_adj,
chan_thresh_frac=args.chan_thresh_frac,
history=history,
overwrite=args.clobber)
|
the-stack_0_14325 | #import dependencies
import os
import csv
#declare csv file path
data = os.path.join("..", "Resources", "budget_data.csv")
#read csv file
with open(data, newline="") as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvfile)
#determine total months and net amount of profit/loss
months = []
profit_loss = []
for rows in csv_reader:
months.append(rows[0])
profit_loss.append(int(rows[1]))
total_months = len(months)
total_pl = sum(profit_loss)
#determine change in profit/losses to determine average, min, max changes
pl_change = []
for x in range(1, len(profit_loss)):
pl_change.append(int(profit_loss[x]-int(profit_loss[x-1])))
pl_average = sum(pl_change) / len(pl_change)
greatest_increase = max(pl_change)
greatest_decrease = min(pl_change)
#print results
print("Financial Analysis")
print("_____________________________")
print("Total Months: " + str(total_months))
print("Total: " + "$" + str(total_pl))
print("Average Change: " + "$" + str(pl_average))
print("Greatest Increase In Profit: " + "$" + str(greatest_increase))
print("Greatest Decrease In Profit: " + "$" + str(greatest_decrease))
#export results to text file
file = open("analysis.txt", "w")
file.write("Financial Analysis" + "\n")
file.write("_____________________________" + "\n")
file.write("Total Months: " + str(total_months) + "\n")
file.write("Total: " + "$" + str(total_pl) + "\n")
file.write("Average Change: " + "$" + str(pl_average) + "\n")
file.write("Greatest Increase In Profit: " + "$" + str(greatest_increase) + "\n")
file.write("Greatest Decrease In Profit: " + "$" + str(greatest_decrease) + "\n")
|
the-stack_0_14326 | from django.apps import apps
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .models import Mail, MailTemplate, Attachment, TemplateAttachment
from .tasks import send_asynchronous_mail
from .utils import create_and_send_mail
from django.core.exceptions import ImproperlyConfigured
class TemplateAttachmentInline(admin.TabularInline):
model = TemplateAttachment
@admin.register(Attachment)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
ordering = ('-time_created',)
search_fields = ('name', )
readonly_fields = ('time_created', )
@admin.register(MailTemplate)
class MailTemplateAdmin(admin.ModelAdmin):
def test_mail_template(self, request, queryset):
mails_sent = 0
if not settings.DJANGO_SAAS_TEST_EMAIL_ADDRESS:
raise ImproperlyConfigured(
"You need to add [email protected] to test emails.")
for template in queryset:
create_and_send_mail(
template_name=template.name, context={}, to_address=settings.DJANGO_SAAS_TEST_EMAIL_ADDRESS)
mails_sent += 1
if mails_sent == 1:
message_bit = _("1 Mail template was")
else:
message_bit = _("%s Mail templates were") % mails_sent
self.message_user(request, "%s tested" % message_bit)
test_mail_template.short_description = "Send test mail now"
list_display = ('name', 'subject')
search_fields = []
ordering = ('name',)
actions = [test_mail_template, ]
inlines = [TemplateAttachmentInline, ]
model_class_name = getattr(settings, "DJANGO_SAAS_EMAIL_MAIL_MODEL", "django_saas_email.mail")
model_class = apps.get_model(*model_class_name.split())
@admin.register(model_class)
class MailAdmin(admin.ModelAdmin):
def send_mail_now(self, request, queryset):
mails_sent = 0
for mail in queryset:
send_asynchronous_mail(str(mail.id), settings.USE_SENDGRID)
mails_sent += 1
if mails_sent == 1:
message_bit = _("1 Mail was")
else:
message_bit = _("%s Mails were") % mails_sent
self.message_user(request, "%s sent" % message_bit)
send_mail_now.short_description = "Send mail now"
list_display = ('id', 'time_created', 'from_address', 'to_address', 'cc_address', 'template', 'subject', 'context',)
search_fields = ['from_address', 'to_address', 'cc_address', 'subject', 'context', ]
ordering = ('-time_created',)
list_filter = ('time_created', 'template')
actions = [send_mail_now, ]
readonly_fields = (
'time_created', 'time_sent', 'time_delivered', 'used_backend', 'delivery_mail_id', 'delivery_status')
|
the-stack_0_14327 | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Plotting utilities to visualize training logs.
"""
import torch
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path, PurePath
def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):
'''
Function to plot specific fields from training log(s). Plots both training and test results.
:: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file
- fields = which results to plot from each log file - plots both training and test for each field.
- ewm_col = optional, which column to use as the exponential weighted smoothing of the plots
- log_name = optional, name of log file if different than default 'log.txt'.
:: Outputs - matplotlib plots of results in fields, color coded for each log file.
- solid lines are training results, dashed lines are test results.
'''
func_name = "plot_utils.py::plot_logs"
# verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,
# convert single Path to list to avoid 'not iterable' error
if not isinstance(logs, list):
if isinstance(logs, PurePath):
logs = [logs]
print(f"{func_name} info: logs param expects a list argument, converted to list[Path].")
else:
raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \
Expect list[Path] or single Path obj, received {type(logs)}")
# verify valid dir(s) and that every item in list is Path object
for i, dir in enumerate(logs):
if not isinstance(dir, PurePath):
raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}")
if dir.exists():
continue
raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}")
# load log file(s) and plot
dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == 'mAP':
coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean()
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f'train_{field}', f'test_{field}'],
ax=axs[j],
color=[color] * 2,
style=['-', '--']
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme='iter'):
if naming_scheme == 'exp_id':
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == 'iter':
names = [f.stem for f in files]
else:
raise ValueError(f'not supported {naming_scheme}')
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data['precision']
recall = data['params'].recThrs
scores = data['scores']
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data['recall'][0, :, 0, -1].mean()
print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +
f'score={scores.mean():0.3f}, ' +
f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title('Precision / Recall')
axs[0].legend(names)
axs[1].set_title('Scores / Recall')
axs[1].legend(names)
return fig, axs
|
the-stack_0_14328 | """
Module of functions involving great circles
(thus assuming spheroid model of the earth)
with points given in longitudes and latitudes.
"""
from __future__ import print_function
import math
import numpy
import numpy.random
# Equatorial radius of the earth in kilometers
EARTH_ER = 6378.137
# Authalic radius of the earth in kilometers
EARTH_AR = 6371.007
# Meridional radius of the earth in kilometers
EARTH_MR = 6367.449
# Polar radius of the earth in kilometers
EARTH_PR = 6356.752
DEG2RAD = math.pi / 180.0
RAD2DEG = 180.0 / math.pi
KM2MI = 0.6213712
MI2KM = 1.609344
def lonlatdistance(pt1lon, pt1lat, pt2lon, pt2lat):
"""
Compute the great circle distance between two points
on a sphere using the haversine formula.
Arguments:
pt1lon - longitude(s) of the first point
pt1lat - latitude(s) of the first point
pt2lon - longitude(s) of the second point
pt2lat - latitude(s) of the second point
Returns:
The great circle distance(s) in degrees [0.0, 180.0]
"""
lon1 = numpy.deg2rad(numpy.asarray(pt1lon, dtype=float))
lat1 = numpy.deg2rad(numpy.asarray(pt1lat, dtype=float))
lon2 = numpy.deg2rad(numpy.asarray(pt2lon, dtype=float))
lat2 = numpy.deg2rad(numpy.asarray(pt2lat, dtype=float))
dellat = numpy.power(numpy.sin(0.5 * (lat2 - lat1)), 2.0)
dellon = numpy.cos(lat1) * numpy.cos(lat2) * \
numpy.power(numpy.sin(0.5 * (lon2 - lon1)), 2.0)
dist = 2.0 * numpy.arcsin(numpy.power(dellon + dellat, 0.5))
return numpy.rad2deg(dist)
def lonlatintersect(gc1lon1, gc1lat1, gc1lon2, gc1lat2,
gc2lon1, gc2lat1, gc2lon2, gc2lat2):
"""
Compute the intersections of two great circles. Uses the line of
intersection between the two planes of the great circles.
Arguments:
gc1lon1 - longitude(s) of the first point on the first great circle
gc1lat1 - latitude(s) of the first point on the first great circle
gc1lon2 - longitude(s) of the second point on the first great circle
gc1lat2 - latitude(s) of the second point on the first great circle
gc2lon1 - longitude(s) of the first point on the second great circle
gc2lat1 - latitude(s) of the first point on the second great circle
gc2lon2 - longitude(s) of the second point on the second great circle
gc2lat2 - latitude(s) of the second point on the second great circle
Returns:
( (pt1lon, pt1lat), (pt2lon, pt2lat) ) - the longitudes and latitudes
of the two intersections of the two great circles. NaN will
be returned for both longitudes and latitudes if a great
circle is not well-defined, or the two great-circles coincide.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc1lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc1lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc1xyz2 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon1, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat1, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz1 = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(gc2lon2, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(gc2lat2, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
gc2xyz2 = numpy.array([gcx, gcy, gcz])
# Get the unit-perpendicular to the plane going through the
# origin and the two points on each great circle. If the
# norm of the cross product is too small, the great circle
# is not well-defined, so zero it out so NaN is produced.
gc1pp = numpy.cross(gc1xyz1, gc1xyz2, axis=0)
norm = (gc1pp[0]**2 + gc1pp[1]**2 + gc1pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc1pp /= norm
gc2pp = numpy.cross(gc2xyz1, gc2xyz2, axis=0)
norm = (gc2pp[0]**2 + gc2pp[1]**2 + gc2pp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gc2pp /= norm
# The line of intersection of the two planes is perpendicular
# to the two plane-perpendiculars and goes through the origin.
# Points of intersection are the points on this line one unit
# from the origin. If the norm of the cross product is too
# small, the two planes are practically indistinguishable from
# each other (coincide).
pt1xyz = numpy.cross(gc1pp, gc2pp, axis=0)
norm = (pt1xyz[0]**2 + pt1xyz[1]**2 + pt1xyz[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
pt1xyz /= norm
pt2xyz = -1.0 * pt1xyz
# Convert back to longitudes and latitudes
pt1lats = numpy.rad2deg(numpy.arcsin(pt1xyz[2]))
pt1lons = numpy.rad2deg(numpy.arctan2(pt1xyz[1], pt1xyz[0]))
pt2lats = numpy.rad2deg(numpy.arcsin(pt2xyz[2]))
pt2lons = numpy.rad2deg(numpy.arctan2(pt2xyz[1], pt2xyz[0]))
return ( (pt1lons, pt1lats), (pt2lons, pt2lats) )
def lonlatfwdpt(origlon, origlat, endlon, endlat, fwdfact):
"""
Find the longitude and latitude of a point that is a given factor
times the distance along the great circle from an origination point
to an ending point.
Note that the shorter great circle arc from the origination point
to the ending point is always used.
If O is the origination point, E is the ending point, and P is
the point returned from this computation, a factor value of:
0.5: P bisects the great circle arc between O and E
2.0: E bisects the great circle arc between O and P
-1.0: O bisects the great circle arc between P and E
Arguments:
origlon - longitude(s) of the origination point
origlat - latitude(s) of the origination point
endlon - longitude(s) of the ending point
endlat - latitude(s) of the ending point
fwdfact - forward distance factor(s)
Returns:
(ptlon, ptlat) - longitude and latitude of the computed point(s).
NaN will be returned for both the longitude and
latitude if the great circle is not well-defined.
"""
# Minimum acceptable norm of a cross product
# arcsin(1.0E-7) = 0.02" or 0.64 m on the Earth
MIN_NORM = 1.0E-7
# Convert longitudes and latitudes to points on a unit sphere
# The "+ 0.0 * ptlonr" is to broadcast gcz if needed
ptlonr = numpy.deg2rad(numpy.asarray(origlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(origlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
origxyz = numpy.array([gcx, gcy, gcz])
#
ptlonr = numpy.deg2rad(numpy.asarray(endlon, dtype=float))
ptlatr = numpy.deg2rad(numpy.asarray(endlat, dtype=float))
gcz = numpy.sin(ptlatr) + 0.0 * ptlonr
coslat = numpy.cos(ptlatr)
gcy = coslat * numpy.sin(ptlonr)
gcx = coslat * numpy.cos(ptlonr)
endxyz = numpy.array([gcx, gcy, gcz])
# Determine the rotation matrix about the origin that takes
# origxyz to (1,0,0) (equator and prime meridian) and endxyz
# to (x,y,0) with y > 0 (equator in eastern hemisphere).
#
# The first row of the matrix is origxyz.
#
# The third row of the matrix is the normalized cross product
# of origxyz and endxyz. (The great circle plane perpendicular.)
# If the norm of this cross product is too small, the great
# circle is not well-defined, so zero it out so NaN is produced.
gcpp = numpy.cross(origxyz, endxyz, axis=0)
norm = (gcpp[0]**2 + gcpp[1]**2 + gcpp[2]**2)**0.5
if len(norm.shape) == 0:
if numpy.fabs(norm) < MIN_NORM:
norm = 0.0
else:
norm[ numpy.fabs(norm) < MIN_NORM ] = 0.0
gcpp /= norm
# The second row of the matrix is the cross product of the
# third row (gcpp) and the first row (origxyz). This will
# have norm 1.0 since gcpp and origxyz are perpendicular
# unit vectors.
fwdax = numpy.cross(gcpp, origxyz, axis=0)
# Get the coordinates of the rotated end point.
endtrx = origxyz[0] * endxyz[0] + origxyz[1] * endxyz[1] + origxyz[2] * endxyz[2]
endtry = fwdax[0] * endxyz[0] + fwdax[1] * endxyz[1] + fwdax[2] * endxyz[2]
# Get the angle along the equator of the rotated end point, multiply
# by the given factor, and convert this new angle back to coordinates.
fwdang = numpy.arctan2(endtry, endtrx)
fwdang *= numpy.asarray(fwdfact, dtype=float)
fwdtrx = numpy.cos(fwdang)
fwdtry = numpy.sin(fwdang)
# Rotate the new point back to the original coordinate system
# The inverse rotation matrix is the transpose of that matrix.
fwdx = origxyz[0] * fwdtrx + fwdax[0] * fwdtry
fwdy = origxyz[1] * fwdtrx + fwdax[1] * fwdtry
fwdz = origxyz[2] * fwdtrx + fwdax[2] * fwdtry
# Convert the point coordinates into longitudes and latitudes
ptlat = numpy.rad2deg(numpy.arcsin(fwdz))
ptlon = numpy.rad2deg(numpy.arctan2(fwdy, fwdx))
return (ptlon, ptlat)
def equidistscatter(min_lon, min_lat, max_lon, max_lat, min_gcdist, dfactor=5.0):
"""
Create a roughly equidistant set of points in a specified region.
This is done by creating a dense "grid" of points, then repeatedly
randomly selecting a point from that collection and eliminating
points too close to that selected point. For the special cases
where min_lon and max_lon, or min_lat and max_lat, are very close
relative to min_gcdist, the maximum number of evenly spaced points
that can be put on the line described is computed and assigned.
Arguments:
min_lon - minimum longitude of the region
min_lat - minimum latitude of the region
max_lon - maximum longitude of the region
max_lat - maximum latitude of the region
min_gcdist - minimum distance, in great circle degrees,
between returned points
dfactor - the number of axis points in the dense "grid"
compared to the desired "grid". Larger value will
generally increase the uniformity of the returned
points but will also increase the time required
for the calculation.
Returns:
(pt_lons, pt_lats) - ptlons is an array of longitudes and ptlats
is an array of latitudes of (somewhat random) points in
the specified region that are roughly equidistant from
each other but not closer than min_gcdist to each other.
"""
lonmin = float(min_lon)
lonmax = float(max_lon)
if math.fabs(lonmax - lonmin) > 180.0:
raise ValueError("Difference between max_lon and min_lon is more than 180.0")
latmin = float(min_lat)
if math.fabs(latmin) > 90.0:
raise ValueError("min_lat is not in [-90.0,90.0]")
latmax = float(max_lat)
if math.fabs(latmax) > 90.0:
raise ValueError("max_lat is not in [-90.0,90.0]")
mindeg = float(min_gcdist)
if (mindeg <= 0.0) or (mindeg >= 90.0):
raise ValueError("min_gcdist is not in (0.0,90.0)")
dfact = float(dfactor)
if dfact < 1.0:
raise ValueError("dfactor is less than one");
# If lonmin is relatively close to lonmax, directly
# compute the points. Distance on a meridian is the
# difference in latitudes.
if math.fabs(lonmax - lonmin) < (0.05 * mindeg):
lon = 0.5 * (lonmax + lonmin)
dellat = mindeg
numlats = int( (math.fabs(latmax - latmin) + dellat) / dellat )
if latmax < latmin:
dellat *= -1.0
hdiff = 0.5 * ( (latmax - latmin) - (numlats - 1) * dellat )
latvals = numpy.linspace(latmin + hdiff, latmax - hdiff, numlats)
lonvals = numpy.ones((numlats,), dtype=float) * lon
return (lonvals, latvals)
# If latmin is relatively close to latmax, directly
# compute the points. Distance depends on the latitude
# as well as the difference in longitudes.
if math.fabs(latmax - latmin) < (0.05 * mindeg):
lat = 0.5 * (latmax + latmin)
numer = math.sin(0.5 * DEG2RAD * mindeg)
denom = math.cos(lat * DEG2RAD)
if numer < denom:
dellon = math.asin(numer / denom) * 2.0 * RAD2DEG
numlons = int( (math.fabs(lonmax - lonmin) + dellon) / dellon )
else:
# everything too close to a pole - just select one point
dellon = 180.0
numlons = 1
if lonmax < lonmin:
dellon *= -1.0
hdiff = 0.5 * ( (lonmax - lonmin) - (numlons - 1) * dellon )
lonvals = numpy.linspace(lonmin + hdiff, lonmax - hdiff, numlons)
latvals = numpy.ones((numlons,), dtype=float) * lat
return (lonvals, latvals)
# Get the number of latitudes for the dense grid
# Always use latmin and latmax, even if they are too close
dellat = mindeg / dfact
numlats = int( (math.fabs(latmax - latmin) + dellat) / dellat )
if numlats < 2:
numlats = 2
latvals = numpy.linspace(latmin, latmax, numlats)
# Create the dense grid of longitudes and latitudes
denslons = [ ]
denslats = [ ]
numer = math.sin(0.5 * DEG2RAD * mindeg / dfact)
for lat in latvals:
# Get the number of longitudes for the dense grid
# Always use lonmin and lonmax, even if they are too close
denom = math.cos(lat * DEG2RAD)
if numer < denom:
dellon = math.asin(numer / denom) * 2.0 * RAD2DEG
numlons = int( (math.fabs(lonmax - lonmin) + dellon) / dellon )
if numlons < 2:
numlons = 2
else:
# too close to a pole
numlons = 2
lonvals = numpy.linspace(lonmin, lonmax, numlons)
# Add each lon,lat pair to the dense grid
for lon in lonvals:
denslons.append(lon)
denslats.append(lat)
denslons = numpy.asarray(denslons)
denslats = numpy.asarray(denslats)
# create a random permutation of the indices to use for the selection order
availinds = numpy.random.permutation(len(denslats))
selectinds = [ ]
while len(availinds) > 0:
# Get the index of the next available point
ind = availinds[0]
selectinds.append(ind)
# Compute the distance of the available points to the selected point
gcdists = lonlatdistance(denslons[ind], denslats[ind],
denslons[availinds], denslats[availinds])
# Remove indices of any available points too close to this point
availinds = availinds[ gcdists >= mindeg ]
# sort the selected indices so the longitudes and latitudes have some order
selectinds = numpy.sort(selectinds)
# get the selected longitudes and latitudes
selectlons = denslons[selectinds]
selectlats = denslats[selectinds]
# return the selected longitudes and latitudes arrays
return (selectlons, selectlats)
#
# The following is just for testing "by-hand" and to serve as examples.
#
if __name__ == "__main__":
# Test lonlatdistance
tenten = numpy.linspace(0.0,90.0,10)
# On the equator, distance = delta longitude
dists = lonlatdistance(0.0, 0.0, tenten, 0.0)
if not numpy.allclose(dists, tenten):
raise ValueError("Equatorial distances FAIL; expect: %s; found: %s" % (str(tenten), str(dists)))
print("Equatorial distance: PASS")
print()
# On any meridian, distance = delta latitude
dists = lonlatdistance(20.0, 0.0, 20.0, tenten)
if not numpy.allclose(dists, tenten):
raise ValueError("Meridional distances FAIL; expect: %s; found: %s" % (str(tenten), str(dists)))
print("Meridional distance: PASS")
print()
# Play with some distances between cities (deg W, deg N)
seattle = (122.0 + (20.0 / 60.0), 47.0 + (37.0 / 60.0))
portland = (122.0 + (41.0 / 60.0), 45.0 + (31.0 / 60.0))
spokane = (117.0 + (26.0 / 60.0), 47.0 + (40.0 / 60.0))
austin = ( 97.0 + (45.0 / 60.0), 30.0 + (15.0 / 60.0))
houston = ( 95.0 + (23.0 / 60.0), 29.0 + (46.0 / 60.0))
dallas = ( 96.0 + (48.0 / 60.0), 32.0 + (47.0 / 60.0))
lons = ( seattle[0], portland[0], spokane[0] )
lons1, lons2 = numpy.meshgrid(lons, lons)
lats = ( seattle[1], portland[1], spokane[1] )
lats1, lats2 = numpy.meshgrid(lats, lats)
dists = lonlatdistance(lons1, lats1, lons2, lats2)
dists *= DEG2RAD * EARTH_MR * KM2MI
expected = [ [ 0, 146, 228 ],
[ 146, 0, 290 ],
[ 228, 290, 0 ] ]
if not numpy.allclose(dists, expected, rtol=0.01):
raise ValueError("Seattle, Portland, Spokane distance matrix in miles\n" \
" expect: %s\n"
" found: %s" % (str(expected), str(dists)))
print("Seattle, Portland, Spokane distance matrix: PASS")
print()
lons = ( austin[0], houston[0], dallas[0] )
lons1, lons2 = numpy.meshgrid(lons, lons)
lats = ( austin[1], houston[1], dallas[1] )
lats1, lats2 = numpy.meshgrid(lats, lats)
dists = lonlatdistance(lons1, lats1, lons2, lats2)
dists *= DEG2RAD * EARTH_MR * KM2MI
expected = [ [ 0, 145, 184 ],
[ 145, 0, 224 ],
[ 184, 224, 0 ] ]
if not numpy.allclose(dists, expected, rtol=0.01):
raise ValueError("Austin, Houston, Dallas distance matrix in miles\n" \
" expect: %s\n"
" found: %s" % (str(expected), str(dists)))
print("Austin, Houston, Dallas distance matrix: PASS")
print()
# Test lonlatintersect
# Intersections of the equator with meridians
((pt1lons, pt1lats), (pt2lons, pt2lats)) = \
lonlatintersect(0.0, 0.0, tenten, 0.0, \
0.0, -90.0, tenten, tenten)
# First of the first great circle and last of the second great circle are not well-defined
expvalid = numpy.array([ True ] + ([ False ]*8) + [ True ])
validity = numpy.isnan(pt1lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt1lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(pt1lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt1lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(pt2lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt2lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(pt2lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of pt2lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
if not numpy.allclose(pt1lons[1:-1], tenten[1:-1]):
raise ValueError("Valid pt1lons: expect: %s, found: %s" %\
(str(tenten[1:-1]), str(pt1lons[1:-1])))
if not numpy.allclose(pt1lats[1:-1], 0.0):
raise ValueError("Valid pt1lats: expect: all zeros, found: %s" %\
str(pt1lats[1:-1]))
if not numpy.allclose(pt2lons[1:-1], tenten[1:-1]-180.0):
raise ValueError("Valid pt2lons: expect: %s, found %s" %\
(str(tenten[1:-1]-180.0), str(pt2lons[1:-1])))
if not numpy.allclose(pt2lats[1:-1], 0.0):
raise ValueError("Valid pt2lats: expect: all zeros, found %s" %\
str(pt2lats[1:-1]))
print("Equator/meridian intersections: PASS")
print()
((pt1lons, pt1lats), (pt2lons, pt2lats)) = \
lonlatintersect( 0.0, 89.99, 180.0, 89.99,
90.0, 89.99, -90.0, 89.99)
# longitudes could actually be anything, but this algorithm gives 45.0 and -135.0
if (abs(pt1lons - 45.0) > 1.0E-8) or (abs(pt1lats - 90.0) > 1.0E-8) or \
(abs(pt2lons + 135.0) > 1.0E-8) or (abs(pt2lats + 90.0) > 1.0E-8):
raise ValueError("Mini north pole cross intersections: expect: %s, found %s" % \
(str([45.0, 90.0, 135.0, -90.0]),
str([float(pt1lons), float(pt1lats),
float(pt2lons), float(pt2lats)])))
print("Mini north pole cross intersections: PASS")
print()
# Test lonlatfwdpt
lons, lats = lonlatfwdpt(portland[0], portland[1], spokane[0], spokane[1], 0.0)
if not ( numpy.allclose(lons, portland[0]) and numpy.allclose(lats, portland[1]) ):
raise ValueError("Zero forward from portland to spokane: expect %s, found %s" % \
(str(portland), str((lons, lats))))
print("Zero forward: PASS")
print()
lons, lats = lonlatfwdpt(portland[0], portland[1], spokane[0], spokane[1], 1.0)
if not ( numpy.allclose(lons, spokane[0]) and numpy.allclose(lats, spokane[1]) ):
raise ValueError("One forward from portland to spokane: expect %s, found %s" % \
(str(spokane), str((lons, lats))))
print("One forward: PASS")
print()
lons, lats = lonlatfwdpt(0.0, 0.0, tenten, 0.0, 3.0)
expectlons = 3.0 * tenten
expectlons[ expectlons > 180.0 ] -= 360.0
# The first great circle is not well-defined
expvalid = numpy.array([ True ] + ([ False ]*9))
validity = numpy.isnan(lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd equator lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd equator lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
if not numpy.allclose(lons[1:], expectlons[1:]):
raise ValueError("Valid fwd equator lons: expect: %s, found: %s" %\
(str(expectlons[1:]), str(lons[1:])))
if not numpy.allclose(lats[1:], 0.0):
raise ValueError("Valid fwd equator lats: expect: all zeros, found: %s" %\
str(lats[1:]))
print("Fwd equator: PASS")
print()
lons, lats = lonlatfwdpt(0.0, -90.0, 0.0, tenten, 2.0)
# First longitude could be anything, but this algorithm gives 0.0
expectlats = 90.0 - 2.0 * tenten
# The last great circle is not well-defined
expvalid = numpy.array(([ False ]*9) + [ True ])
validity = numpy.isnan(lons)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd prime meridian lons: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
validity = numpy.isnan(lats)
if not numpy.allclose(validity, expvalid):
raise ValueError("Validity of fwd prime meridian lats: expect: %s, found: %s" % \
(str(expvalid), str(validity)))
# First longitude could be anything so ignore it
# Others should be either 180 == -180
poslons = lons[1:-1]
poslons[(poslons < 0.0)] += 360.0
if not numpy.allclose(poslons, 180.0):
raise ValueError("Valid fwd prime meridian lons: expect: all 180.0 or -180.0, found: %s" %\
str(poslons))
if not numpy.allclose(lats[:-1], expectlats[:-1]):
raise ValueError("Valid fwd prime meridian lats: expect: %s, found: %s" %\
(str(expectlats[:-1]), str(lats[:-1])))
print("Fwd prime meridian: PASS")
print()
lons, lats = lonlatfwdpt(0.0, 0.0, 45.0, 45.0, (2.0, 3.0, 4.0, 5.0))
expectlons = [ 135.0, 180.0, -135.0, -45.0 ]
expectlats = [ 45.0, 0.0, -45.0, -45.0 ]
if not numpy.allclose(lons, expectlons):
raise ValueError("Fwd diagonal lons: expect: %s, found: %s" %\
(str(expectlons), str(lons)))
if not numpy.allclose(lats, expectlats):
raise ValueError("Fwd diagonal lats: expect: %s, found: %s" %\
(str(expectlats), str(lats)))
print("Fwd diagonal: PASS")
print()
# Test equdistscatter
lons, lats = equidistscatter(0.0, 0.0, 0.0, 0.0, 1.0)
if (lons.shape != (1,)) or (lons[0] != 0.0) or \
(lats.shape != (1,)) or (lats[0] != 0.0):
raise ValueError("Equidistscatter single-point FAIL; \n" \
" expect: ([0.0],[0.0]), \n" \
" found (%s,%s)" % (str(lons), str(lats)))
print("Equidistscatter single-point PASS")
print()
lons, lats = equidistscatter(0.0, 90.0, 90.0, 90.0, 1.0)
if (lons.shape != (1,)) or (lons[0] != 45.0) or \
(lats.shape != (1,)) or (lats[0] != 90.0):
raise ValueError("Equidistscatter pole-point FAIL; \n" \
" expect: ([45.0],[90.0]), \n" \
" found (%s,%s)" % (str(lons), str(lats)))
print("Equidistscatter pole-point PASS")
print()
lons, lats = equidistscatter(0.0, 0.0, 90.0, 0.0, 1.0)
if not numpy.all( lats == 0.0 ):
raise ValueError("Equidistscatter equatorial FAIL; \n" \
" expect: all zero latitudes, \n" \
" found %s" % str(lats))
deltas = lons[1:] - lons[:-1]
if not numpy.all( deltas >= 1.0 ):
raise ValueError("Equidistscatter equatorial FAIL; \n" \
" expect: longitudes monotonic increasing by at least 1.0 degrees, \n" \
" found %s" % str(lons))
if not numpy.all( deltas < 1.0001 ):
raise ValueError("Equidistscatter equatorial FAIL; \n" \
" expect: longitudes monotonic increasing by less than 1.0001 degrees, \n" \
" found %s" % str(lons))
print("Equidistscatter equatorial PASS")
print()
lons, lats = equidistscatter(0.0, 0.0, 0.0, 90.0, 1.0)
if not numpy.all( lons == 0.0 ):
raise ValueError("Equidistscatter meridional FAIL; \n" \
" expect: all zero longitudes, \n" \
" found %s" % str(lons))
deltas = lats[1:] - lats[:-1]
if not numpy.all( deltas >= 1.0 ):
raise ValueError("Equidistscatter meridional FAIL; \n" \
" expect: latitudes monotonic increasing by at least 1.0 degrees, \n" \
" found %s" % str(lats))
if not numpy.all( deltas < 1.0001 ):
raise ValueError("Equidistscatter meridional FAIL; \n" \
" expect: latitudes monotonic increasing by less than 1.0001 degrees, \n" \
" found %s" % str(lats))
print("Equidistscatter meridional PASS")
print()
lons, lats = equidistscatter(0.0, 0.0, 90.0, 90.0, 5.0, 15.0)
nndists = [ ]
for j in range(len(lons)):
gcdists = lonlatdistance(lons[j], lats[j], lons, lats)
gcdists[j] = 180.0
if not numpy.all( gcdists >= 5.0 ):
raise ValueError("Equidistscatter region FAIL; \n" \
" expect distances[%d] >= 2.0, \n" \
" found %s" % (j, str(gcdists)))
nndists.append(gcdists.min())
nndists = numpy.array(nndists)
if not numpy.all( nndists < 10.0 ):
raise ValueError("Equidistscatter region FAIL; \n" \
" expect nearest neighbor distances < 10.0, \n" \
" found %s" % str(nndists))
print("Nearest neighbor distances: \n" \
" min = %f, max = %f, mean = %f, stdev = %f" % \
(nndists.min(), nndists.max(), nndists.mean(), nndists.std()))
print("Equidistscatter region PASS")
print()
|
the-stack_0_14329 | from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from proso.django.models import disable_for_loaddata
from proso_flashcards.models import Term, Context
class ExtendedTerm(Term):
extra_info = models.TextField()
def to_json(self, nested=False):
json = Term.to_json(self, nested)
json["extra-info"] = self.extra_info
return json
@staticmethod
def load_data(data, term):
if 'extra-info' in data:
term.extra_info = data["extra-info"]
def dump_data(self, term):
if self.extra_info:
term["extra-info"] = self.extra_info
class ExtendedContext(Context):
extra_info = models.TextField()
def to_json(self, **kwargs):
json = Context.to_json(self, **kwargs)
return json
@staticmethod
def load_data(data, context):
if 'extra-info' in data:
context.extra_info = data["extra-info"]
def dump_data(self, context):
if self.extra_info:
context["extra-info"] = self.extra_info
settings.PROSO_FLASHCARDS["term_extension"] = ExtendedTerm
settings.PROSO_FLASHCARDS["context_extension"] = ExtendedContext
@receiver(pre_save, sender=ExtendedTerm)
@receiver(pre_save, sender=ExtendedContext)
@disable_for_loaddata
def create_items(sender, instance, **kwargs):
pre_save.send(sender=sender.__bases__[0], instance=instance)
|
the-stack_0_14331 | # https://github.com/wolny/pytorch-3dunet/tree/master/pytorch3dunet/unet3d
import argparse
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
import os
import random
import torch.utils.data
import torchvision.utils as vutils
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
import importlib
import torch.nn as nn
from model.unet3d.buildingblocks import Encoder, Decoder, DoubleConv, ExtResNetBlock
from model.unet3d.utils import number_of_features_per_level
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
f_maps (int, tuple): if int: number of feature maps in the first conv layer of the encoder (default: 64);
if tuple: number of feature maps at each level
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
pool_kernel_size (int or tuple): the size of the window
conv_padding (int or tuple): add zero-padding added to all three sides of the input
"""
def __init__(self, in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, testing=False,
conv_kernel_size=3, pool_kernel_size=2, conv_padding=1, **kwargs):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
# create encoder path consisting of Encoder modules. Depth of the encoder is equal to `len(f_maps)`
encoders = []
for i, out_feature_num in enumerate(f_maps):
if i == 0:
encoder = Encoder(in_channels, out_feature_num,
apply_pooling=False, # skip pooling in the firs encoder
basic_module=basic_module,
conv_layer_order=layer_order,
conv_kernel_size=conv_kernel_size,
num_groups=num_groups,
padding=conv_padding)
else:
# TODO: adapt for anisotropy in the data, i.e. use proper pooling kernel to make the data isotropic after 1-2 pooling operations
encoder = Encoder(f_maps[i - 1], out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
conv_kernel_size=conv_kernel_size,
num_groups=num_groups,
pool_kernel_size=pool_kernel_size,
padding=conv_padding)
encoders.append(encoder)
self.encoders = nn.ModuleList(encoders)
# create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1`
decoders = []
reversed_f_maps = list(reversed(f_maps))
for i in range(len(reversed_f_maps) - 1):
if basic_module == DoubleConv:
in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1]
else:
in_feature_num = reversed_f_maps[i]
out_feature_num = reversed_f_maps[i + 1]
# TODO: if non-standard pooling was used, make sure to use correct striding for transpose conv
# currently strides with a constant stride: (2, 2, 2)
decoder = Decoder(in_feature_num, out_feature_num,
basic_module=basic_module,
conv_layer_order=layer_order,
conv_kernel_size=conv_kernel_size,
num_groups=num_groups,
padding=conv_padding)
decoders.append(decoder)
self.decoders = nn.ModuleList(decoders)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, **kwargs):
super(UNet3D, self).__init__(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid,
basic_module=DoubleConv, f_maps=f_maps, layer_order=layer_order,
num_groups=num_groups, num_levels=num_levels, is_segmentation=is_segmentation,
conv_padding=conv_padding, **kwargs)
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=5, is_segmentation=True, conv_padding=1, **kwargs):
super(ResidualUNet3D, self).__init__(in_channels=in_channels, out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=ExtResNetBlock, f_maps=f_maps, layer_order=layer_order,
num_groups=num_groups, num_levels=num_levels,
is_segmentation=is_segmentation, conv_padding=conv_padding,
**kwargs)
class UNet2D(Abstract3DUNet):
"""
Just a standard 2D Unet. Arises naturally by specifying conv_kernel_size=(1, 3, 3), pool_kernel_size=(1, 2, 2).
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, **kwargs):
if conv_padding == 1:
conv_padding = (0, 1, 1)
super(UNet2D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_kernel_size=(1, 3, 3),
pool_kernel_size=(1, 2, 2),
conv_padding=conv_padding,
**kwargs)
class First3D(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, dropout=False):
super(First3D, self).__init__()
layers = [
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout3d(p=dropout))
self.first = nn.Sequential(*layers)
def forward(self, x):
return self.first(x)
class Encoder3D(nn.Module):
def __init__(
self, in_channels, middle_channels, out_channels,
dropout=False, downsample_kernel=2
):
super(Encoder3D, self).__init__()
layers = [
nn.MaxPool3d(kernel_size=downsample_kernel),
nn.Conv3d(in_channels, middle_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv3d(middle_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels)
# allow output neg
# nn.ReLU(inplace=True)
]
if dropout:
assert 0 <= dropout <= 1, 'dropout must be between 0 and 1'
layers.append(nn.Dropout3d(p=dropout))
self.encoder = nn.Sequential(*layers)
def forward(self, x):
return self.encoder(x)
class Discriminator(nn.Module):
def __init__(self, in_channels,conv_depths=(4,8, 16, 32,64,128,1)):
assert len(conv_depths) > 2, 'conv_depths must have at least 3 members'
super(Discriminator, self).__init__()
# defining encoder layers
encoder_layers = []
encoder_layers.append(First3D(in_channels, conv_depths[0], conv_depths[0]))
encoder_layers.extend([Encoder3D(conv_depths[i], conv_depths[i + 1], conv_depths[i + 1])
for i in range(len(conv_depths)-1)])
# encoder, center and decoder layers
self.encoder_layers = nn.Sequential(*encoder_layers)
def forward(self, x, return_all=False):
x_enc = [x]
for enc_layer in self.encoder_layers:
x_enc.append(enc_layer(x_enc[-1]))
return F.sigmoid(x_enc[-1])
class RecGAN(nn.Module):
def __init__(self):
pass
super(RecGAN, self).__init__()
# create AE (3D-Unet)
# input 64*64*64*1 output 64*64*64*1
self.unet = ResidualUNet3D(1,1,final_sigmoid=True, f_maps=64, layer_order='gcr',num_groups=8, num_levels=5, is_segmentation=False, conv_padding=1)
# create discriminator (like the encoder)
self.discriminator = Discriminator(1)
def forward(self,X):
Y_rec = self.unet(X)
dis = self.discriminator(Y_rec)
return F.sigmoid(Y_rec),dis
if __name__ == '__main__':
input_data = torch.rand([1,1,64,64,64])
recgan = RecGAN()
output,dis = recgan(input_data)
print(output.shape)
print(dis.shape) |
the-stack_0_14332 | #!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
"""Runtime module. Contains runtime base class and language specific runtime classes."""
import signal
import subprocess
import sys
import time
import os
import serializerfactory
import subprocessfactory
import tracer
from runbook import *
from workerexception import *
json = serializerfactory.get_serializer(sys.version_info)
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
_, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if os.path.isfile(executable):
return executable
if path is None:
path = os.environ.get('PATH', None)
if path is None:
try:
path = os.confstr("CS_PATH")
except (AttributeError, ValueError):
# os.confstr() or CS_PATH is not available
path = os.defpath
# bpo-35755: Don't use os.defpath if the PATH environment variable is
# set to an empty string
# PATH='' doesn't match, whereas PATH=':' looks in the current directory
if not path:
return None
paths = path.split(os.pathsep)
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
class Runtime(object):
"""Runtime base class."""
def __init__(self, job_data, runbook):
"""
:type job_data : jrdsclient.JobData"
:type runbook : Runbook
"""
# should be overwritten by language runtime
self.execution_alias = None
self.base_cmd = None
# used for actual runtime
self.runbook = runbook
self.runbook_subprocess = None
self.job_data = job_data
def initialize(self):
self.runbook.write_to_disk()
def start_runbook_subprocess(self):
"""Creates the runbook subprocess based on the script language and using properties set by the derived class.
Requires self.base_cmd & self.runbook_file_path to be set by derived class.
"""
cmd = self.base_cmd + [self.runbook.runbook_file_path]
job_parameters = self.job_data.parameters
if job_parameters is not None and len(job_parameters) > 0:
for parameter in job_parameters:
tracer.log_debug_trace("Parameter is: \n" + str(parameter))
if self.runbook.definition_kind_str == "PowerShell" and parameter["Name"]:
# Handle named parameters for PowerShell arriving out of order
cmd += ["-%s" % parameter["Name"]]
cmd += [str(json.loads(parameter["Value"]))]
# Do not copy current process env var to the sandbox process
env = os.environ.copy()
env.update({"AUTOMATION_JOB_ID": str(self.job_data.job_id),
"AUTOMATION_ACTIVITY_ID": str(tracer.u_activity_id),
"PYTHONPATH": str(configuration.get_source_directory_path()),
"HOME": str(os.getcwd())}) # windows env have to be str (not unicode)
self.runbook_subprocess = subprocessfactory.create_subprocess(cmd=cmd,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def kill_runbook_subprocess(self):
"""Attempts to kill the runbook subprocess.
This method will attempt to kill the runbook subprocess [max_attempt_count] and will return if successful.
Throws:
SandboxRuntimeException : If runbook subprocess is still alive after [max_attempt_count].
"""
attempt_count = 0
max_attempt_count = 3
while attempt_count < max_attempt_count:
if self.runbook_subprocess is not None and self.runbook_subprocess.poll() is None:
os.kill(self.runbook_subprocess.pid, signal.SIGTERM)
runbook_proc_is_alive = self.is_process_alive(self.runbook_subprocess)
if runbook_proc_is_alive is False:
return
attempt_count += 1
time.sleep(attempt_count)
else:
return
raise SandboxRuntimeException()
@staticmethod
def is_process_alive(process):
"""Checks if the given process is still alive.
Returns:
boolean : True if the process [pid] is alive, False otherwise.
"""
if process.poll() is None:
return True
else:
return False
def is_runtime_supported(self):
"""Validates that the OS supports the language runtime by testing the executable file path.
Returns:
True : If executable exist.
False : Otherwise.
"""
if find_executable(self.execution_alias) is None:
return False
else:
return True
class PowerShellRuntime(Runtime):
"""PowerShell runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "pwsh"
if linuxutil.is_posix_host() is False:
self.execution_alias = "powershell"
self.base_cmd = [self.execution_alias, "-File"]
class Python2Runtime(Runtime):
"""Python 2 runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "python2"
if get_default_python_interpreter_major_version() == 2:
self.execution_alias = "python"
self.base_cmd = [self.execution_alias]
class Python3Runtime(Runtime):
"""Python 3 runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "python3"
if get_default_python_interpreter_major_version() == 3:
self.execution_alias = "python3"
self.base_cmd = [self.execution_alias]
class BashRuntime(Runtime):
"""Bash runtime derived class."""
def __init__(self, job_data, runbook):
Runtime.__init__(self, job_data, runbook)
self.execution_alias = "bash"
self.base_cmd = [self.execution_alias]
def get_default_python_interpreter_major_version():
"""Return the default "python" alias interpreter version.
Returns:
int, the interpreter major version
None, if the default interpreter version cannot be detected
"""
cmd = ["python3", "-c", "import sys;print(sys.version[0])"] # need to use print() for python3 compatibility
p = subprocessfactory.create_subprocess(cmd=cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
default_interpreter_version, error = p.communicate()
if p.returncode == 0:
return int(default_interpreter_version.decode().strip())
else:
return None
|
the-stack_0_14333 | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import torch
from base.learners.skill_discovery.base import BaseSkillDiscoveryLearner
class BaseSMMLearner(BaseSkillDiscoveryLearner):
AGENT_TYPE = 'SMM'
def __init__(self, skill_n, **kwargs):
self.skill_n = int(skill_n)
# At least trigger the default usage for im and density modules
if 'im_params' not in kwargs:
kwargs['im_params'] = {}
if 'density_params' not in kwargs:
kwargs['density_params'] = {}
super().__init__(**kwargs)
self.im_type = 'reverse_mi'
self.density_type = 'vae'
def relabel_episode(self):
super().relabel_episode()
# Add density model reward
self._add_density_reward()
def relabel_batch(self, batch):
batch = super().relabel_batch(batch)
# Compute reward from density model
with torch.no_grad():
new_density_rew = self.density.novelty(batch)
# Make sure that weights for density rewards are not None
density_nu = self.density_nu if self.density_nu is not None else 0.
# Detach density rewards from computation graph
new_density_rew = new_density_rew.detach()
batch['reward'] = batch['reward'] + density_nu * new_density_rew
batch['density_model_reward'] = new_density_rew
return batch
def _compute_novelty(self, batched_episode):
return self.density.novelty(batched_episode)
def _add_density_reward(self):
if self.density is not None:
for ep in self._compress_me:
batched_episode = {key: torch.stack([e[key] for e in ep]) for key in ep[0].keys()}
novelty = self._compute_novelty(batched_episode)
if self.density_scale:
self.train()
_ = self._density_bn(novelty.view(-1, 1))
self.eval()
novelty = novelty / torch.sqrt(self._density_bn.running_var[0])
for e, s in zip(ep, novelty):
e['reward'] += (self.density_nu * s.detach())
e['density_model_reward'] = s.detach()
def get_density_loss(self, batch):
return self.density(batch)
|
the-stack_0_14334 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 24 11:01:42 2020
@author: amarmore
"""
# Everything related to the segmentation of the autosimilarity.
import numpy as np
import math
from scipy.sparse import diags
import musicae.model.errors as err
import warnings
def get_autosimilarity(an_array, transpose = False, normalize = False):
"""
Encapsulates the autosimilarity generation of a matrix.
Parameters
----------
an_array : numpy array
The array/matrix seen as array which autosimilarity os to compute.
transpose : boolean, optional
Whether the array has to be transpose for computing the autosimilarity.
The default is False.
normalize : boolean, optional
Whether to normalize the autosimilarity.
The default is False.
Returns
-------
numpy array
The autosimilarity of this array.
"""
if type(an_array) is list:
this_array = np.array(an_array)
else:
this_array = an_array
if transpose:
this_array = this_array.T
if normalize:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide") # Avoiding to show the warning, as it's handled, not te confuse the user.
this_array = np.array([list(i/np.linalg.norm(i)) for i in this_array.T]).T
this_array = np.where(np.isnan(this_array), 1e-10, this_array) # Replace null lines, avoiding best-path retrieval to fail
return this_array.T@this_array
def compute_all_kernels(max_size, convolution_type = "full"):
"""
Precomputes all kernels of size 0 ([0]) to max_size, and feed them to the Dynamic Progamming algorithm.
Parameters
----------
max_size : integer
The maximal size (included) for kernels.
convolution_type: string
The type of convolution. (to explicit)
Possibilities are :
- "full" : squared matrix entirely composed of one, except on the diagonal where it's zero.
The associated convolution cost for a segment (b_1, b_2) will be
.. math::
c_{b_1,b_2} = \\frac{1}{b_2 - b_1 + 1}\\sum_{i,j = 0, i \\ne j}^{n - 1} a_{i + b_1, j + b_1}
- "eight_bands" : squared matrix where the only nonzero values are ones on the
8 subdiagonals surrounding the main diagonal.
The associated convolution cost for a segment (b_1, b_2) will be
.. math::
c_{b_1,b_2} = \\frac{1}{b_2 - b_1 + 1}\\sum_{i,j = 0, 1 \\leq |i - j| \\leq 4}^{n - 1} a_{i + b_1, j + b_1}
- "mixed" : sum of both previous kernels, i.e. values are zero on the diagonal,
2 on the 8 subdiagonals surrounding the main diagonal, and 1 elsewhere.
The associated convolution cost for a segment (b_1, b_2) will be
.. math::
c_{b_1,b_2} = \\frac{1}{b_2 - b_1 + 1}(2*\\sum_{i,j = 0, 1 \\leq |i - j| \\leq 4}^{n - 1} a_{i + b_1, j + b_1} \\ + \sum_{i,j = 0, |i - j| > 4}^{n - 1} a_{i + b_1, j + b_1})
Returns
-------
kernels : array of arrays (which are kernels)
All the kernels, of size 0 ([0]) to max_size.
"""
kernels = [[0]]
for p in range(1,max_size + 1):
if p < 4:
kern = np.ones((p,p)) - np.identity(p)
else:
if convolution_type == "full":
# Full kernel (except for the diagonal)
kern = np.ones((p,p)) - np.identity(p)
elif convolution_type == "eight_bands":
# Diagonal where only the eight subdiagonals surrounding the main diagonal is one
k = np.array([np.ones(p-4),np.ones(p-3),np.ones(p-2),np.ones(p-1),np.zeros(p),np.ones(p-1),np.ones(p-2),np.ones(p-3),np.ones(p-4)])
offset = [-4,-3,-2,-1,0,1,2,3,4]
kern = diags(k,offset).toarray()
elif convolution_type == "mixed":
# Sum of both previous kernels
k = np.array([np.ones(p-4),np.ones(p-3),np.ones(p-2),np.ones(p-1),np.zeros(p),np.ones(p-1),np.ones(p-2),np.ones(p-3),np.ones(p-4)])
offset = [-4,-3,-2,-1,0,1,2,3,4]
kern = np.ones((p,p)) - np.identity(p) + diags(k,offset).toarray()
else:
raise err.InvalidArgumentValueException(f"Convolution type not understood: {convolution_type}.")
kernels.append(kern)
return kernels
def convolutionnal_cost(cropped_autosimilarity, kernels):
"""
The convolution measure on this part of the autosimilarity matrix.
Parameters
----------
cropped_autosimilarity : list of list of floats or numpy array (matrix representation)
The part of the autosimilarity which convolution measure is to compute.
kernels : list of arrays
Acceptable kernels.
Returns
-------
float
The convolution measure.
"""
p = len(cropped_autosimilarity)
kern = kernels[p]
#return np.mean(np.multiply(kern,cropped_autosimilarity))
return np.sum(np.multiply(kern,cropped_autosimilarity)) / p**2
def convolution_entire_matrix_computation(autosimilarity_array, kernels, kernel_size = 8):
"""
Computes the convolution measure on the entire autosimilarity matrix, with a defined and fixed kernel size.
Parameters
----------
autosimilarity_array : list of list of floats or numpy array (matrix representation)
The autosimilarity matrix.
kernels : list of arrays
All acceptable kernels.
kernel_size : integer
The size of the kernel for this measure.
Returns
-------
cost : list of float
List of convolution measures, at each bar of the autosimilarity.
"""
cost = np.zeros(len(autosimilarity_array))
for i in range(kernel_size, len(autosimilarity_array)):
cost[i] = convolutionnal_cost(autosimilarity_array[i - kernel_size:i,i - kernel_size:i], kernels)
return cost
def dynamic_convolution_computation(autosimilarity, min_size = 1, max_size = 36, penalty_weight = 1, penalty_func = "modulo8", convolution_type = "mixed"):
"""
Dynamic programming algorithm, computing a maximization of a cost, sum of segments' costs on the autosimilarity.
This cost is a combination of
- the convolutionnal cost on the segment, with a dynamic size,
- a penalty cost, function of the size of the segment, to enforce specific sizes (with prior knowledge),
The penalty cost is computed in the function "penalty_cost_from_arg()".
See this function for further details.
It returns the optimal segmentation according to this cost.
This algortihm is also desribed in [1].
Parameters
----------
autosimilarity : list of list of float (list of columns)
The autosimilarity to segment.
min_size : integer, optional
The minimal length of segments.
The default is 1.
max_size : integer, optional
The maximal length of segments.
The default is 36.
penalty_weight : float, optional
The ponderation parameter for the penalty function
penalty_func : string
The type of penalty function to use.
See "penalty_cost_from_arg()" for further details.
convolution_type : string
The type of convolution we want to use in this computation.
See "compute_all_kernels()" for a detailed list of possibilities.
Raises
------
ToDebugException
If the program fails, generally meaning that the autosimilarity is incorrect.
Returns
-------
list of tuples
The segments, as a list of tuples (start, end).
integer
Global cost (the minimal among all).
References
----------
[1] Marmoret, A., Cohen, J., Bertin, N., & Bimbot, F. (2020, October).
Uncovering Audio Patterns in Music with Nonnegative Tucker Decomposition for Structural Segmentation.
In ISMIR 2020-21st International Society for Music Information Retrieval.
"""
costs = [-math.inf for i in range(len(autosimilarity))]
segments_best_starts = [None for i in range(len(autosimilarity))]
segments_best_starts[0] = 0
costs[0] = 0
kernels = compute_all_kernels(max_size, convolution_type = convolution_type)
conv_eight = convolution_entire_matrix_computation(autosimilarity, kernels)
for current_idx in range(1, len(autosimilarity)): # Parse all indexes of the autosimilarity
for possible_start_idx in possible_segment_start(current_idx, min_size = min_size, max_size = max_size):
if possible_start_idx < 0:
raise err.ToDebugException("Invalid value of start index.")
# Convolutionnal cost between the possible start of the segment and the current index (entire segment)
conv_cost = convolutionnal_cost(autosimilarity[possible_start_idx:current_idx,possible_start_idx:current_idx], kernels)
segment_length = current_idx - possible_start_idx
penalty_cost = penalty_cost_from_arg(penalty_func, segment_length)
this_segment_cost = conv_cost * segment_length - penalty_cost * penalty_weight * np.max(conv_eight)
# Note: conv_eight is not normalized by its size (not a problem in itself as size is contant, but generally not specified in formulas).
# Avoiding errors, as segment_cost are initially set to -inf.
if possible_start_idx == 0:
if this_segment_cost > costs[current_idx]:
costs[current_idx] = this_segment_cost
segments_best_starts[current_idx] = 0
else:
if costs[possible_start_idx] + this_segment_cost > costs[current_idx]:
costs[current_idx] = costs[possible_start_idx] + this_segment_cost
segments_best_starts[current_idx] = possible_start_idx
segments = [(segments_best_starts[len(autosimilarity) - 1], len(autosimilarity) - 1)]
precedent_frontier = segments_best_starts[len(autosimilarity) - 1] # Because a segment's start is the previous one's end.
while precedent_frontier > 0:
segments.append((segments_best_starts[precedent_frontier], precedent_frontier))
precedent_frontier = segments_best_starts[precedent_frontier]
if precedent_frontier == None:
raise err.ToDebugException("Well... The dynamic programming algorithm took an impossible path, so it failed. Understand why.") from None
return segments[::-1], costs[-1]
def penalty_cost_from_arg(penalty_func, segment_length):
"""
Returns a penalty cost, function of the size of the segment.
The penalty function has to be specified, and is bound to evolve in the near future,
so this docstring won't explain it.
Instead, you'll have to read the code, sorry! It is pretty straightforward though.
The ``modulo'' functions are based on empirical prior knowledge,
following the fact that pop music is generally composed of segments of 4 or 8 bars.
Parameters
----------
penalty_func : string
Identifier of the penalty function.
segment_length : integer
Size of the segment.
Returns
-------
float
The penalty cost.
"""
if penalty_func == "modulo4":
if segment_length %4 == 0:
return 0
elif segment_length %2 == 0:
return 1/2
else:
return 1
if penalty_func == "modulo8":
if segment_length == 8:
return 0
elif segment_length %4 == 0:
return 1/4
elif segment_length %2 == 0:
return 1/2
else:
return 1
if penalty_func == "moduloSmall8and4":
if segment_length > 12:
return 100
elif segment_length == 8:
return 0
elif segment_length == 4:
return 1/4
elif segment_length %2 == 0:
return 1/2
else:
return 1
if penalty_func == "sargentdemi":
return abs(segment_length - 8) ** (1/2)
if penalty_func == "sargentun":
return abs(segment_length - 8)
if penalty_func == "sargentdeux":
return abs(segment_length - 8) ** 2
else:
raise err.InvalidArgumentValueException(f"Penalty function not understood {penalty_func}.")
def possible_segment_start(idx, min_size = 1, max_size = None):
"""
Generates the list of all possible starts of segments given the index of its end.
Parameters
----------
idx: integer
The end of a segment.
min_size: integer
Minimal length of a segment.
max_size: integer
Maximal length of a segment.
Returns
-------
list of integers
All potentials starts of structural segments.
"""
if min_size < 1: # No segment should be allowed to be 0 size
min_size = 1
if max_size == None:
return range(0, idx - min_size + 1)
else:
if idx >= max_size:
return range(idx - max_size, idx - min_size + 1)
elif idx >= min_size:
return range(0, idx - min_size + 1)
else:
return []
# %% Novelty cost, deprecated, but could be used in comparison tests.
def novelty_cost(cropped_autosimilarity):
"""
Novelty measure on this part of the autosimilarity matrix.
The size of the kernel will be the size of the parameter matrix.
Parameters
----------
cropped_autosimilarity : list of list of floats or numpy array (matrix representation)
The part of the autosimilarity which novelty measure is to compute.
Raises
------
NotImplementedError
If the size of the autosimilarity is odd (novlety kernel can't fit this matrix).
Returns
-------
float
The novelty measure.
"""
# Kernel is of the size of cropped_autosimilarity
if len(cropped_autosimilarity) == 0:
return 0
if len(cropped_autosimilarity) % 2 == 1:
raise NotImplementedError("Error")
#return (novelty_cost(cropped_autosimilarity[:-1, :-1]) + novelty_cost(cropped_autosimilarity[1:, 1:])) / 2
kernel_size = int(len(cropped_autosimilarity) / 2)
kernel = np.kron(np.array([[1,-1], [-1, 1]]), np.ones((kernel_size, kernel_size)))
return np.mean(kernel*cropped_autosimilarity)
def novelty_computation(autosimilarity_array, kernel_size):
"""
Computes the novelty measure of all of the autosimilarity matrix, with a defined and fixed kernel size.
Parameters
----------
autosimilarity_array : list of list of floats or numpy array (matrix representation)
The autosimilarity matrix.
kernel_size : integer
The size of the kernel.
Raises
------
NotImplementedError
If the kernel size is odd, can't compute the novelty measure.
Returns
-------
cost : list of float
List of novelty measures, at each bar of the autosimilarity.
"""
if kernel_size % 2 == 1:
raise NotImplementedError("The kernel should be even.") from None
cost = np.zeros(len(autosimilarity_array))
half_kernel = int(kernel_size / 2)
for i in range(half_kernel, len(autosimilarity_array) - half_kernel):
cost[i] = novelty_cost(autosimilarity_array[i - half_kernel:i + half_kernel,i - half_kernel:i + half_kernel])
return cost
# %% Related to the novelty computation, so deprecated.
def peak_picking(tab, window_size = 1):
"""
Returns the indexes of peaks of values in the given list of values.
A value is considered "peak" if it's a local maximum,
and if all values in the window (defined by 'window_size) before and after
are strictly monotonous.
Used for peak picking in the novelty measure.
Parameters
----------
tab : list of float
The list of values to study.
window_size : boolean, optional
Size of the window around a possible peak to be considered "peak",
ie number of consecutive values where the values should increase (before) and (decrease) after.
The default is 1.
Returns
-------
to_return : list of integers
The indexes where values are peaking.
"""
to_return = []
for current_idx in range(window_size, len(tab) - window_size):
if is_increasing(tab[current_idx - window_size:current_idx + 1]) and is_increasing(tab[current_idx:current_idx + window_size + 1][::-1]):
to_return.append(current_idx)
return to_return
def valley_picking(tab, window_size = 1):
"""
Returns the indexes of valleys of values in the desired list of values.
A value is considered "valley" if it's a local minimum,
and if all values in the window (defined by 'window_size) before and after
are strictly monotonous.
Used for peak picking in the novelty measure.
Parameters
----------
tab : list of float
The list of values to study.
window_size : boolean, optional
Size of the window around a possible valley to be considered "valley",
ie number of consecutive values where the values should decrease (before) and increase (after).
The default is 1.
Returns
-------
to_return : list of integers
The indexes where values are valleys.
"""
to_return = []
for current_idx in range(window_size, len(tab) - window_size):
if is_increasing(tab[current_idx - window_size:current_idx + 1][::-1]) and is_increasing(tab[current_idx:current_idx + window_size + 1]):
to_return.append(current_idx)
return to_return
def is_increasing(tab):
"""
Tests if the tab values are increasing.
Used for peak picking in the novelty measure.
Parameters
----------
tab : list of float
The values.
Returns
-------
boolean
Whether the values are increasing or not.
"""
if len(tab) <= 1 or len(np.unique(tab)) == 1:
return False
for idx in range(len(tab) - 1):
if tab[idx] > tab[idx+1]:
return False
return True
def decreasing_peaks(data):
"""
Returns the peaks indexes of a list of values in their decreasing order of values.
Used for peak picking in the novelty measure.
Parameters
----------
data : list of float
The values.
Returns
-------
list of integers
The indexes of the peaks, sorted in their decreasing order of values.
"""
peaks_and_value = []
for idx in peak_picking(data, window_size = 1):
peaks_and_value.append((idx, data[idx]))
return sorted(peaks_and_value, key=lambda x:x[1], reverse = True)
def select_highest_peaks_thresholded_indexes(data, percentage = 0.33):
"""
Returns the peaks higher than a percentage of the maximal peak from a list of values.
Used for peak picking in the novelty measure.
Parameters
----------
data : list of floats
The values.
percentage : float, optional
The percentage of the maximal value for a peak to be valid.
The default is 0.33.
Returns
-------
list of integers
Indexes of the valid peaks.
"""
peaks = np.array(decreasing_peaks(data))
max_peak = peaks[0,1]
for idx, peak in enumerate(peaks):
if peak[1] < percentage * max_peak:
return [int(i) for i in sorted(peaks[:idx, 0])]
return [int(i) for i in sorted(peaks[:,0])]
def mean(val_a, val_b):
"""
A function returning the mean of both values.
This function is redeveloped so as to be called as choice_func in the function "values_as_slop()" (see below) in external projects.
Parameters
----------
val_a : float
First value.
val_b : float
Second value.
Returns
-------
float: mean of both values.
"""
return (val_a + val_b) / 2
def values_as_slop(value, choice_func = max):
"""
Compute peaks of a value (typically novelty measure)
as the difference between absolute peaks and absolute valleys.
Function choice_func determines the way of computing this gap.
Typically, max will compute peaks as the maximum gap between a peaks and its two closest valleys,
whereas min will select the minimal gap.
This returns an array containing zeroes where there is no peak in absoluite value,
and this new value as a gap computation where there was peaks before.
Parameters
----------
value : array of float
The absolute value of the measure.
choice_func : function name, optional
Type of the function selecting the difference between peaks and valleys.
Classical values are "max" for selecting the maximum gap between the peak and both its closest valleys,
"min" for the minimum of both gaps, and "mean" (called autosimilarity_segmentation.mean) for the mean of both gaps.
The default is max.
Returns
-------
peak_valley_slop : array of floats
The new values of peaks as gaps, and 0 everywhere else.
"""
peaks = peak_picking(value, window_size = 1)
valleys = valley_picking(value, window_size = 1)
peak_valley_slop = np.zeros(len(value))
for peak in peaks:
i = 0
while i < len(valleys) and valleys[i] < peak:
i+=1
if i == 0:
left_valley = 0
right_valley = valleys[i]
elif i == len(valleys):
left_valley = valleys[i - 1]
right_valley = 0
else:
left_valley = valleys[i - 1]
right_valley = valleys[i]
chosen_valley_value = choice_func(value[left_valley], value[right_valley])
peak_valley_slop[peak] = value[peak] - chosen_valley_value
return peak_valley_slop
|
the-stack_0_14335 | from __future__ import print_function
from colorama import *
import webbrowser
import sys
import time
# Initialize colored output and set colors
init()
# Get settings from file
file = open('settings.txt', 'r')
settings = file.readlines()
file.close()
# Set timer in seconds
pomodoro = int(settings[1])*60
# Set URL to open
url = settings[4]
# Header
print(" ----------------- ")
print(Fore.GREEN + " MyPymodoro v1.0 " + Style.RESET_ALL)
print(" ----------------- ")
print(Fore.YELLOW + " http://dvt32.blogspot.com/\n" + Style.RESET_ALL)
# Time left information
print (" Timer started! Break coming up in " + Back.RED + str(pomodoro / 60) + " minutes" + Style.RESET_ALL + "!\n")
# Print time elapsed
for second in range(pomodoro):
print (" Time left until break: " +
Fore.YELLOW +
str(pomodoro / 60) + " minute(s), " +
str(pomodoro % 60) + " seconds" + " " +
Style.RESET_ALL,
end="\r")
sys.stdout.flush()
pomodoro -= 1
time.sleep(1)
# Load video after time is up
webbrowser.open(url)
|
the-stack_0_14336 | """
source: https://stackoverflow.com/questions/37117878/generating-a-filled-polygon-inside-a-numpy-array
"""
import numpy as np
import imageio
def polygon(a, vertices):
fill = np.ones(a.shape) * True
idx = np.indices(a.shape)
# loop over pairs of corner points
for k in range(vertices.shape[0]):
p1, p2 = vertices[k-1], vertices[k]
dist = p2 - p1
max_idx = (idx[0] - p1[0]) / dist[0] * dist[1] + p1[1]
sign = np.sign(dist[0])
check = idx[1] * sign <= max_idx * sign
fill = np.all([fill, check], axis=0)
a[fill] = 127
# clockwise!
vertices = np.array([[50,120], [80,380], [230,240]])
a = np.zeros((400, 400), dtype=np.uint8)
polygon(a, vertices)
imageio.imsave('triangle.png', a)
|
the-stack_0_14339 | #________INDEX____________.
# |
# 3 functions |
# (6=3+2+1) |
# |
# -4 auxiliary |
# -1 main) |
# |
# (if __name__==__main__) |
#_________________________|
import sys
import re
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.stats import dirichlet, beta, bernoulli
from sklearn.metrics import precision_score, recall_score, f1_score
from functools import partial
#-------------------------------------------------------
# First part: Four (4) auxiliary generic functions
#
# (1) "tags_dict" : mapping from numbers
# to composers' names
#
# (2) "four_plots" : compare four classifiers
# for a specific composer
# in a specific axes
# (humans, random, random forests &
# neural network). It's
# a dispatcher for 'plotter'
#
# (3) "plotter" : the actual plotter func
#
# (4) "measure_dispatcher": choose the comparison measure
#
#---------------------------------------------------------
# (1)
def tags_dict():
with open('tags.dat','r') as f:
tags_list = f.readlines()
tags = {}
key_pattern = re.compile('item:[ ]+([a-z]+)',re.I)
item_pattern = re.compile('key:[ ]+([0-9]+)',re.I)
for x in tags_list:
tags[re.search(
key_pattern, x
).group(1)] = re.search(
item_pattern, x
).group(1)
inverted_tags = {}
for k,i in tags.items(): inverted_tags[i] = k
return tags, inverted_tags
# (2)
def four_plots(name: str, axis, data: dict):
plotter('humans',name, axis, data['humans'])
plotter('network',name, axis, data['network'])
plotter('random',name, axis, data['random'])
plotter('forest',name, axis, data['forest'])
# insert here visual specs!
#---------------------------------------------
#ax.legend()
#ax.set_title(f'{name}')
props = dict(boxstyle='round', facecolor='lavender', alpha=0.3)
ax.text(0.5, 0.95, name, transform=ax.transAxes, fontsize=12,
verticalalignment='top', bbox=props)
ax.set_xlim(0,100)
ax.set_ylim(0,200)
ax.set_yticklabels([])
ax.set_yticks([0,100,200])
ax.set_xticks([0,25,50,75,100])
#---------------------------------------------
return
# (3)
def plotter(being: str, name: str, axis, data: dict):
# Color-blind friendly palette
colors = {'forest':'#d7191c',
'network':'#fdae61',
'random':'#abd9e9',
'humans':'#2c7bb6',}
# Beta Distribution generation from the measured predictive-quality
ypred = beta.rvs(
1+data[f'total {name}'][0] * data[name][0],
1+data[f'total {name}'][0] * (1 - data[name][0]),
size=4000,
)
# plot with Seaborn that binnarizes automatically
sns.distplot(100*ypred, bins=None, hist=True, kde=False,
label = f'{being}', ax=axis, color=colors[being])
return
# (4)
def measure_dispatcher(measure: str,ytrue: np.ndarray,
ypred:np.ndarray, optrand = 0):
"""
possible measures:
'recall' == TP / (TP + FN)
'precision' == TP / (TP + FP)
'f1' == 2 * TP / (2*TP + FP + FN)
'accuracy' = (TP + TN) / (TN + FN + TP + FP)
"""
# Random ytrue,ypred & Tags
if optrand:
ytrue_rand, ypred_rand = np.split(
np.argmax(
dirichlet.rvs(
alpha = [1,1]*4,
size = 2*optrand
),
1,
).reshape( #split 2
2,-1,
),
2,0,
)
ytrue_rand = ytrue_rand.reshape(-1,)
ypred_rand = ypred_rand.reshape(-1,)
tags = {int(x):y for x,y in tags_dict()[1].items()}
# Recall
def recall(ytrue, ypred):
nonlocal tags
precision = recall_score(ytrue, ypred, average=None)
data = {}
for x in tags.keys():
data[tags[x]] = [precision[x]]
#data[f'total {tags[x]}'] = [np.unique(
# ypred,
# return_counts=True,
# )[1][x]]
data[f'total {tags[x]}'] = [500]
return data
# Precision
def precision(ytrue, ypred):
nonlocal tags
precision = precision_score(ytrue, ypred, average=None)
data = {}
for x in tags.keys():
data[tags[x]] = [precision[x]]
data[f'total {tags[x]}'] = [np.unique(
ypred,
return_counts=True,
)[1][x]]
return data
# F1
def f1(ytrue, ypred):
nonlocal tags
precision = f1_score(ytrue, ypred, average=None)
data = {}
for x in tags.keys():
data[tags[x]] = [precision[x]]
data[f'total {tags[x]}'] = [np.unique(
ypred,
return_counts=True,
)[1][x]]
return data
# Accuracy
def accuracy(ytrue, ypred):
nonlocal tags
data = {}
for x in tags.keys():
temp = []
for i,y in enumerate(ytrue):
if ypred[i]==y and y==x: temp.append(1)
elif ypred[i]==x: temp.append(0)
elif y==x: temp.append(0)
elif y!=x and ypred[i]!=x: temp.append(1)
(data[tags[x]],
data[f'total {tags[x]}']) = ([sum(temp)/len(temp)],
[len(temp)])
return data
func = {'accuracy': accuracy, 'f1':f1,
'precision': precision, 'recall':recall,}
if optrand:
return func[measure](ytrue_rand, ypred_rand)
else:
return func[measure](ytrue, ypred)
#------------------------------------------------------------------
# M A I N
#------------------------------------------------------------------
def main(measure):
# Define the measure!
#
# (with TP: True Positive
# TN: True Negative
# ...etc)
#
# 'recall' == TP / (TP + FN)
#
# 'precision' == TP / (TP + FP)
#
# 'f1' == 2 * TP / (2*TP + FP + FN)
#
# 'accuracy' = (TP + TN) / (TN + FN + TP + FP)
#____________________________________________
expand = partial(measure_dispatcher, measure)
#--------------------------------------------
# 1) HUMAN answers are loaded
#humans = pd.read_csv('human-predictions.csv').iloc[:,1:]
a=pd.read_csv(
'human-predictions.csv'
).applymap(
lambda x: int(
tags_dict()[0][x]
)
).to_numpy()
humans = pd.DataFrame(expand(a[:,0],a[:,1]))
# 2) Random
samples = 6000
random = expand(range(samples), range(samples), samples)
# 3)
if True:
#----R-A-N-D-O-M--F-O-R-E-S-T---c-l-a-s-s-i-f-i-e-r----.
# choose: |
# 'val' == validation set (more points) |
# |
# 'test' == testing set (more comparable, only |
# over the audio-samples|
# humans answered on) |
name = 'val'
#------------------------------------------------------/
forest = pd.read_csv(name+'-forest-predictions.csv')
forest = expand(forest.iloc[:,0].to_numpy(),
forest.iloc[:,1].to_numpy())
# 4)
if True:
#----------N-E-U-R-A-L--N-E-T-W-O-R-K------------------.
# choose: |
# 'val' == validation set (more points) |
# |
# 'test' == testing set (more comparable, only |
# over the audio-samples|
# humans answered on) |
name = 'val'
#------------------------------------------------------/
network = pd.read_csv(name+'-network-predictions.csv')
network = expand(network.iloc[:,0].to_numpy(),
network.iloc[:,1].to_numpy())
return {'random':random, 'humans':humans,
'network':network, 'forest':forest}
if __name__=='__main__':
# Define a figure with 2x4=8 subplots
nrow = 4; ncol = 2;
alpha = 8
fig, axs = plt.subplots(nrows=nrow, ncols=ncol,
figsize=(alpha*2,alpha),dpi=200)
# Extract composers names from the quiz data
names = ['Scarlatti', 'Sor', 'Bach',
'Vivaldi','Stravinsky',
'Haendel', 'Liszt', 'Haydn', ]
# Generate a dictionary with results
try:
metric = sys.argv[1]
except IndexError:
metric = 'f1'
data = main(metric)
# Plot one composers' classification result in each subplot
for i,ax in enumerate(axs.reshape(-1)):
try:
four_plots(names[i], ax, data)
except IndexError: pass
#------------------------------------------
# Visual specs, Save & View!
#------------------------------------------
fig.text(0.5, 0.89, '(Probability Density Function of the True Positive Rate'\
' conditioned to a positive observation)' ,ha='center',fontsize=11)
fig.text(0.5,0.92, '$PDF(Recall)$', fontsize=14.5,ha='center')
fig.text(0.5, 0.04, 'Probability of a positive being True Positive', ha='center')
fig.text(0.1, 0.5, 'Probability Density', va='center', rotation='vertical')
fig.set_facecolor('ivory')
for i,ax in enumerate(axs.reshape(-1)):
if i==7: ax.legend()
#------------------------------------------
plt.savefig('../RESULTS/RESULTS.png', facecolor='ivory')
|
the-stack_0_14340 | _base_ = [
'../swin/cascade_mask_rcnn_swin_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco.py'
]
model = dict(
backbone=dict(
type='CBSwinTransformer',
),
neck=dict(
type='CBFPN',
),
test_cfg = dict(
rcnn=dict(
score_thr=0.001,
nms=dict(type='soft_nms'),
)
)
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from HTC
data_root = "datasets/objects365/"
data = dict(
train=dict(
ann_file=data_root + 'annotations/instances_train.json',
img_prefix=data_root + 'train/'))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Resize',
img_scale=[(1600, 400), (1600, 1400)],
#img_scale=[(256,256), (256,256)],
multiscale_mode='range',
keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
samples_per_gpu = 1
data = dict(samples_per_gpu=samples_per_gpu,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(lr=0.0001*(samples_per_gpu/2))
runner=dict(max_epochs=20)
load_from = "/home/ubuntu/efs/pretrained_weight/cascade_mask_rcnn_cbv2_swin_small_patch4_window7_mstrain_400-1400_adamw_3x_coco.pth" |
the-stack_0_14341 | def strASCII(s):
c = []
for x in s:
c.append(format(ord(x), 'b').zfill(8))
c = "".join(c)
return c
def ascistr(l,r):
joi = [l,r]
joi = ''.join(joi)
joi = list(map("".join, zip(*[iter(joi)] * 8)))
#print joi
for x in range(len(joi)):
joi[x] = chr(int(joi[x],2))
joi = ''.join(joi)
return joi
|
the-stack_0_14343 | from django.conf import settings
from django.db import models
class PGPManager(models.Manager):
use_for_related_fields = True
use_in_migrations = True
@staticmethod
def _get_pgp_symmetric_decrypt_sql(field):
"""Decrypt sql for symmetric fields using the cast sql if required."""
sql = """pgp_sym_decrypt("{0}"."{1}", '{2}')"""
if hasattr(field, 'cast_sql'):
sql = field.cast_sql % sql
return sql.format(
field.model._meta.db_table,
field.name,
settings.PGCRYPTO_KEY,
)
@staticmethod
def _get_pgp_public_key_decrypt_sql(field):
"""Decrypt sql for public key fields using the cast sql if required."""
sql = """pgp_pub_decrypt("{0}"."{1}", dearmor('{2}'))"""
if hasattr(field, 'cast_sql'):
sql = field.cast_sql % sql
return sql.format(
field.model._meta.db_table,
field.name,
settings.PRIVATE_PGP_KEY,
)
def get_queryset(self, *args, **kwargs):
"""Decryption in queryset through meta programming."""
# importing here otherwise there's a circular reference issue
from pgcrypto.mixins import PGPSymmetricKeyFieldMixin, PGPPublicKeyFieldMixin
skip_decrypt = kwargs.pop('skip_decrypt', None)
qs = super().get_queryset(*args, **kwargs)
# The Django admin skips this process because it's extremely slow
if not skip_decrypt:
select_sql = {}
encrypted_fields = []
for field in self.model._meta.get_fields():
if isinstance(field, PGPSymmetricKeyFieldMixin):
select_sql[field.name] = self._get_pgp_symmetric_decrypt_sql(field)
encrypted_fields.append(field.name)
elif isinstance(field, PGPPublicKeyFieldMixin):
select_sql[field.name] = self._get_pgp_public_key_decrypt_sql(field)
encrypted_fields.append(field.name)
# Django queryset.extra() is used here to add decryption sql to query.
qs = qs.defer(
*encrypted_fields
).extra(
select=select_sql
)
return qs
|
the-stack_0_14345 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: image.py
Author: dhilipsiva <[email protected]>
Date created: 2017-02-10
"""
from PIL import ImageFilter
from PIL import Image
size = (128, 128)
im = Image.open("corgi.jpg")
# print(im.format, im.size, im.mode)
# im.thumbnail(size)
# im.show()
box = (1400, 500, 2000, 1000)
region = im.crop(box)
region = region.transpose(Image.ROTATE_90)
# region = region.transpose(Image.FLIP_LEFT_RIGHT)
# region.resize((128, 128))
# region = region.filter(ImageFilter.DETAIL)
region = region.filter(ImageFilter.BLUR)
region.show()
# out = im.rotate(45) # degrees counter-clockwise
|
the-stack_0_14346 | '''
2016 - 2017 ACSL American Computer Science League
SENIOR DIVISION
Contest #2 ASCENDING STRINGS
'''
from unittest import TestCase
def atFirst(snum):
'''
First thoughts
'''
h, r = 0, len(snum)
res = []
while r > h:
# forward
res.append(int(snum[s:e]))
# backward
res.append(int(snum[s:e:-1]))
return res
def parsNums(snum):
def cutforth(snum, h, r, maxn):
if h >= r: return None, h
l = 1
n = int(snum[h : h + l])
if not maxn: return n, h + 1
while n <= maxn and h + l < r:
l += 1
n = int(snum[h : h + l])
return n if h + l <= r and n > maxn else None, h + l
def cutback(snum, h, r, maxn):
if h >= r: return None, r
l = 1
n = int(snum[r - 1 : r - l - 1 : -1])
if not maxn: return n, r - 1
while n <= maxn and h + l < r:
l += 1
n = int(snum[r - 1 : r - l - 1 : -1])
return n if h + l <= r and n > maxn else None, r - l
h, r = 0, len(snum)
res = []
n = None
while r > h:
# forward, backward
n, h = cutforth(snum, h, r, n)
if n == None: break
res.append(n)
n, r = cutback(snum, h, r, n)
if n == None: break
res.append(n)
return res
if __name__ == "__main__":
t = TestCase()
t.assertCountEqual([3, 8, 14, 35, 159], parsNums('31415926538'))
t.assertCountEqual([3, 5, 14, 62, 159], parsNums('314159265'))
t.assertCountEqual([2, 7, 16], parsNums('201617'))
t.assertCountEqual([1, 9, 23, 87, 456], parsNums('123456789'))
t.assertCountEqual([1, 4, 22, 44, 333], parsNums('1223334444'))
t.assertCountEqual([2, 8, 71, 281, 828], parsNums('2718281828'))
t.assertCountEqual([1, 12, 22, 23], parsNums('12233221'))
t.assertCountEqual([5, 50], parsNums('5005'))
t.assertCountEqual([2, 5], parsNums('250'))
t.assertCountEqual([9], parsNums('9'))
print('OK!')
|
the-stack_0_14348 | import numpy as np
import os
import torch
class Dataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, trajs, device, steps=20):
'Initialization'
dim = trajs[0].shape[1]
self.x = []
self.x_n = np.zeros((0, dim))
for i in range(steps):
tr_i_all = np.zeros((0,dim))
for tr_i in trajs:
_trj = tr_i[i:i-steps,:]
tr_i_all = np.concatenate((tr_i_all, _trj), 0)
self.x_n = np.concatenate((self.x_n, tr_i[-1:,:]),0)
self.x.append(tr_i_all)
self.x = torch.from_numpy(np.array(self.x)).float().to(device)
self.x_n = torch.from_numpy(np.array(self.x_n)).float().to(device)
self.len_n = self.x_n.shape[0]
self.len = self.x.shape[1]
self.steps_length = steps
self.step = steps - 1 |
the-stack_0_14350 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
# the name of the project
name = 'nbformat'
#-----------------------------------------------------------------------------
# Minimal Python version sanity check
#-----------------------------------------------------------------------------
import sys
#-----------------------------------------------------------------------------
# get on with it
#-----------------------------------------------------------------------------
import os
from glob import glob
from distutils.core import setup
pjoin = os.path.join
here = os.path.abspath(os.path.dirname(__file__))
pkg_root = pjoin(here, name)
packages = []
for d, _, _ in os.walk(pjoin(here, name)):
if os.path.exists(pjoin(d, '__init__.py')):
packages.append(d[len(here)+1:].replace(os.path.sep, '.'))
package_data = {
'nbformat.tests' : [
'*.ipynb',
],
'nbformat.v3' : [
'nbformat.v3*.schema.json',
],
'nbformat.v4' : [
'nbformat.v4*.schema.json',
],
}
version_ns = {}
with open(pjoin(here, name, '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = dict(
name = name,
version = version_ns['__version__'],
scripts = glob(pjoin('scripts', '*')),
packages = packages,
package_data = package_data,
include_package_data = True,
description = "The Jupyter Notebook format",
long_description= """
This package contains the base implementation of the Jupyter Notebook format,
and Python APIs for working with notebooks.
""",
author = 'Jupyter Development Team',
author_email = '[email protected]',
url = 'http://jupyter.org',
license = 'BSD',
python_requires = '>=3.5',
platforms = "Linux, Mac OS X, Windows",
keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
if 'develop' in sys.argv or any(a.startswith('bdist') for a in sys.argv):
import setuptools
setuptools_args = {}
install_requires = setuptools_args['install_requires'] = [
'ipython_genutils',
'traitlets>=4.1',
'jsonschema>=2.4,!=2.5.0',
'jupyter_core',
]
extras_require = setuptools_args['extras_require'] = {
'fast': ['fastjsonschema'],
'test': ['check-manifest', 'fastjsonschema', 'testpath', 'pytest', 'pytest-cov'],
}
if 'setuptools' in sys.modules:
setup_args.update(setuptools_args)
setup_args['entry_points'] = {
'console_scripts': [
'jupyter-trust = nbformat.sign:TrustNotebookApp.launch_instance',
]
}
setup_args.pop('scripts', None)
if __name__ == '__main__':
setup(**setup_args)
|
the-stack_0_14351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************
# @Time : 2018/9/9 15:52
# @Author : Xiang Ling
# @Lab : nesa.zju.edu.cn
# @File : defenses.py
# **************************************
import os
from abc import ABCMeta
from abc import abstractmethod
class Defense(object):
__metaclass__ = ABCMeta
def __init__(self, model=None, defense_name=None):
self.model = model
self.defense_name = defense_name
defense_dir = '../DefenseEnhancedModels/{}'.format(self.defense_name)
if self.defense_name not in os.listdir('../DefenseEnhancedModels/'):
os.mkdir(defense_dir)
print('creating the {} folder for storing the {} defense'.format(defense_dir, self.defense_name))
else:
print('the storing {} folder is already existing'.format(defense_dir))
@abstractmethod
def defense(self):
print("abstract method of 'Defenses' is not implemented")
raise NotImplementedError
|
the-stack_0_14353 | import argparse
import configparser
from collections import defaultdict
import itertools
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
import os
import random
import time
import numpy as np
import chainer
if chainer.backends.cuda.available:
import cupy as xp
else:
xp = np
from chainercv.utils import non_maximum_suppression
from PIL import ImageDraw, Image
from coco_dataset import get_coco_dataset
from mpii_dataset import get_mpii_dataset
from model import PoseProposalNet
from network_resnet import ResNet50
from utils import parse_size
COLOR_MAP = {}
DIRECTED_GRAPHS = [[]]
DATA_MODULE = None
def get_feature(model, image):
global inference_time
start = time.time()
image = xp.asarray(image)
processed_image = model.feature_layer.prepare(image)
resp, conf, x, y, w, h, e = model.predict(xp.expand_dims(processed_image, axis=0))
resp = chainer.backends.cuda.to_cpu(resp.array)
conf = chainer.backends.cuda.to_cpu(conf.array)
w = chainer.backends.cuda.to_cpu(w.array)
h = chainer.backends.cuda.to_cpu(h.array)
x = chainer.backends.cuda.to_cpu(x.array)
y = chainer.backends.cuda.to_cpu(y.array)
e = chainer.backends.cuda.to_cpu(e.array)
resp = np.squeeze(resp, axis=0)
conf = np.squeeze(conf, axis=0)
x = np.squeeze(x, axis=0)
y = np.squeeze(y, axis=0)
w = np.squeeze(w, axis=0)
h = np.squeeze(h, axis=0)
e = np.squeeze(e, axis=0)
inference_time=time.time() - start
logger.info('inference time {:.5f}'.format(inference_time))
return resp, conf, x, y, w, h, e
def estimate(model, image, detection_thresh=0.15, min_num_keypoints=-1):
feature_map = get_feature(model, image)
return get_humans_by_feature(model, feature_map, detection_thresh, min_num_keypoints)
def get_humans_by_feature(model, feature_map, detection_thresh=0.15, min_num_keypoints=-1):
resp, conf, x, y, w, h, e = feature_map
start = time.time()
delta = resp * conf
K = len(model.keypoint_names)
outW, outH = model.outsize
ROOT_NODE = 0 # instance
start = time.time()
rx, ry = model.restore_xy(x, y)
rw, rh = model.restore_size(w, h)
ymin, ymax = ry - rh / 2, ry + rh / 2
xmin, xmax = rx - rw / 2, rx + rw / 2
bbox = np.array([ymin, xmin, ymax, xmax])
bbox = bbox.transpose(1, 2, 3, 0)
root_bbox = bbox[ROOT_NODE]
score = delta[ROOT_NODE]
candidate = np.where(score > detection_thresh)
score = score[candidate]
root_bbox = root_bbox[candidate]
selected = non_maximum_suppression(
bbox=root_bbox, thresh=0.3, score=score)
root_bbox = root_bbox[selected]
logger.info('detect instance {:.5f}'.format(time.time() - start))
start = time.time()
humans = []
e = e.transpose(0, 3, 4, 1, 2)
ei = 0 # index of edges which contains ROOT_NODE as begin
# alchemy_on_humans
for hxw in zip(candidate[0][selected], candidate[1][selected]):
human = {ROOT_NODE: bbox[(ROOT_NODE, hxw[0], hxw[1])]} # initial
for graph in DIRECTED_GRAPHS:
eis, ts = graph
i_h, i_w = hxw
for ei, t in zip(eis, ts):
index = (ei, i_h, i_w) # must be tuple
u_ind = np.unravel_index(np.argmax(e[index]), e[index].shape)
j_h = i_h + u_ind[0] - model.local_grid_size[1] // 2
j_w = i_w + u_ind[1] - model.local_grid_size[0] // 2
if j_h < 0 or j_w < 0 or j_h >= outH or j_w >= outW:
break
if delta[t, j_h, j_w] < detection_thresh:
break
human[t] = bbox[(t, j_h, j_w)]
i_h, i_w = j_h, j_w
if min_num_keypoints <= len(human) - 1:
humans.append(human)
logger.info('alchemy time {:.5f}'.format(time.time() - start))
logger.info('num humans = {}'.format(len(humans)))
return humans
def draw_humans(keypoint_names, edges, pil_image, humans, mask=None, visbbox=True):
"""
This is what happens when you use alchemy on humans...
note that image should be PIL object
"""
start = time.time()
drawer = ImageDraw.Draw(pil_image)
for human in humans:
for k, b in human.items():
if mask:
fill = (255, 255, 255) if k == 0 else None
else:
fill = None
ymin, xmin, ymax, xmax = b
if k == 0: # human instance
# adjust size
t = 1
xmin = int(xmin * t + xmax * (1 - t))
xmax = int(xmin * (1 - t) + xmax * t)
ymin = int(ymin * t + ymax * (1 - t))
ymax = int(ymin * (1 - t) + ymax * t)
if mask:
resized = mask.resize(((xmax - xmin), (ymax - ymin)))
pil_image.paste(resized, (xmin, ymin), mask=resized)
else:
drawer.rectangle(xy=[xmin, ymin, xmax, ymax],
fill=fill,
outline=COLOR_MAP[keypoint_names[k]])
else:
if visbbox:
drawer.rectangle(xy=[xmin, ymin, xmax, ymax],
fill=fill,
outline=COLOR_MAP[keypoint_names[k]])
else:
r = 2
x = (xmin + xmax) / 2
y = (ymin + ymax) / 2
drawer.ellipse((x - r, y - r, x + r, y + r),
fill=COLOR_MAP[keypoint_names[k]])
for s, t in edges:
if s in human and t in human:
by = (human[s][0] + human[s][2]) / 2
bx = (human[s][1] + human[s][3]) / 2
ey = (human[t][0] + human[t][2]) / 2
ex = (human[t][1] + human[t][3]) / 2
drawer.line([bx, by, ex, ey],
fill=COLOR_MAP[keypoint_names[s]], width=3)
logger.info('draw humans {: .5f}'.format(time.time() - start))
return pil_image
def create_model(args, config):
global DIRECTED_GRAPHS, COLOR_MAP
dataset_type = config.get('dataset', 'type')
if dataset_type == 'mpii':
import mpii_dataset as x_dataset
elif dataset_type == 'coco':
import coco_dataset as x_dataset
else:
raise Exception('Unknown dataset {}'.format(dataset_type))
KEYPOINT_NAMES = x_dataset.KEYPOINT_NAMES
EDGES = x_dataset.EDGES
DIRECTED_GRAPHS = x_dataset.DIRECTED_GRAPHS
COLOR_MAP = x_dataset.COLOR_MAP
model = PoseProposalNet(
model_name=config.get('model_param', 'model_name'),
insize=parse_size(config.get('model_param', 'insize')),
keypoint_names=KEYPOINT_NAMES,
edges=np.array(EDGES),
local_grid_size=parse_size(config.get('model_param', 'local_grid_size')),
parts_scale=parse_size(config.get(dataset_type, 'parts_scale')),
instance_scale=parse_size(config.get(dataset_type, 'instance_scale')),
width_multiplier=config.getfloat('model_param', 'width_multiplier'),
)
logger.info('input size = {}'.format(model.insize))
logger.info('output size = {}'.format(model.outsize))
try:
result_dir = args.model
except:
result_dir = args
chainer.serializers.load_npz(
os.path.join(result_dir, 'bestmodel.npz'),
model
)
logger.info('cuda enable {}'.format(chainer.backends.cuda.available))
logger.info('ideep enable {}'.format(chainer.backends.intel64.is_ideep_available()))
if chainer.backends.cuda.available:
logger.info('gpu mode')
model.to_gpu()
elif chainer.backends.intel64.is_ideep_available():
logger.info('Indel64 mode')
model.to_intel64()
return model
def load_config(args):
config = configparser.ConfigParser()
config_path = os.path.join(args.model, 'src', 'config.ini')
logger.info(config_path)
config.read(config_path, 'UTF-8')
return config
def predict(args):
config = load_config(args)
detection_thresh = config.getfloat('predict', 'detection_thresh')
min_num_keypoints = config.getint('predict', 'min_num_keypoints')
dataset_type = config.get('dataset', 'type')
logger.info('loading {}'.format(dataset_type))
if dataset_type == 'mpii':
_, test_set = get_mpii_dataset(
insize=parse_size(config.get('model_param', 'insize')),
image_root=config.get(dataset_type, 'images'),
annotations=config.get(dataset_type, 'annotations'),
train_size=config.getfloat(dataset_type, 'train_size'),
min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
seed=config.getint('training_param', 'seed'),
)
elif dataset_type == 'coco':
test_set = get_coco_dataset(
insize=parse_size(config.get('model_param', 'insize')),
image_root=config.get(dataset_type, 'val_images'),
annotations=config.get(dataset_type, 'val_annotations'),
min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
)
else:
raise Exception('Unknown dataset {}'.format(dataset_type))
model = create_model(args, config)
# choose specific image
idx = random.choice(range(len(test_set)))
idx = 50
image = test_set.get_example(idx)['image']
humans = estimate(
model,
image.astype(np.float32),
detection_thresh,
min_num_keypoints,
)
pil_image = Image.fromarray(image.transpose(1, 2, 0).astype(np.uint8))
pil_image = draw_humans(
keypoint_names=model.keypoint_names,
edges=model.edges,
pil_image=pil_image,
humans=humans,
visbbox=config.getboolean('predict', 'visbbox')
)
#pil_image.save('result.png', 'PNG')
pil_image.save('result_' + 'X'.join((str(_.insize[0]), str(_.insize[1]))) + '_idx_' + str(idx) + '_time_' + str(round(inference_time, 3)) + 's.png', 'PNG')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('model', help='path/to/model', type=str)
return parser.parse_args()
def main():
args = parse_arguments()
predict(args)
if __name__ == '__main__':
main()
|
the-stack_0_14355 | import scipy as sp
from timer import timer
def entropy(values):
"""A slow way to calculate the entropy of the input values"""
values = sp.asarray(values).flatten()
#calculate the probablility of a value in a vector
vUni = sp.unique(values)
lenval = float(values.size)
FreqData = sp.zeros(vUni.shape, dtype=float)
for i in xrange(FreqData.size):
FreqData[i] = sum(values==vUni[i])/lenval
return -sum([FreqData[i]*sp.math.log(FreqData[i],2) for i in xrange(FreqData.size)])
def entropy2(values):
"""Calculate the entropy of vector values.
values will be flattened to a 1d ndarray."""
values = sp.asarray(values).flatten()
p = sp.diff(sp.c_[0,sp.diff(sp.sort(values)).nonzero(), values.size])/float(values.size)
H = (p*sp.log2(p)).sum()
return -H
def chebyshev2(values, degree=1):
"""Calculate the Chebyshev Polynomials using previous results"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
for x in range(len(values)):
A[i,x] = 2*values[x]*A[i-1,x]-A[i-2,x]
return A
def chebyshev2_lc(values, degree=1):
"""Calculate the Chebyshev Polynomials using previous results"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
A[i,:] = [2*x for x in values]*A[i-1,:]-A[i-2,:]
return A
def chebyshev_sp(values, degree=1):
"""Calculate the Chebyshev Polynomials using the scipy functions"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
A[i,:] = sp.cos(i*sp.arccos(values))
return A
def chebyshev_vec(values, degree=1):
"""Calculate the Chebyshev Polynobials
This implementation uses sp.vectorize to vectorize python's math functions)"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
cos = sp.vectorize(sp.math.cos)
acos = sp.vectorize(sp.math.acos)
for i in range(2,degree):
A[i,:] = cos(i*acos(values))
return A
def chebyshev_lc(values, degree=1):
"""Calculate the Chebyshev Polynomials using list comprehensions"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
A[i,:] = [sp.math.cos(y) for y in [i*sp.math.acos(x) for x in values]]
return A
def chebyshev(values, degree=1):
"""Calculate the Chebyshev Polynomial using
Tn(x) = cos(n*cosh(x))"""
values = sp.asarray(values)
A = sp.zeros((degree, len(values)))
A[0,:]=1
try:
A[1,:]=values
except IndexError:
return A
for i in range(2,degree):
for x in values:
A[i,:] = sp.math.cos(i*sp.math.acos(x))
return A
if __name__ == '__main__':
from timer import timer
testvals = sp.linspace(-1,1,500)
funcs = [chebyshev, chebyshev_lc, chebyshev_vec, chebyshev_sp, chebyshev2, chebyshev2_lc]
with timer(loops=5) as t:
for f in funcs:
t.time(f, testvals, 100)
t.printTimes()
|
the-stack_0_14356 | """
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
import django_heroku
from decouple import config
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
ENVIRONMENT = config('ENVIRONMENT', default='local')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', default='SECRET_KEY')
# SECURITY WARNING: define the correct hosts in production!
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=lambda v: [
s.strip() for s in v.split(',')], default='*')
INSTALLED_APPS = [
'home',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.modeladmin',
'wagtail.contrib.styleguide',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'django.contrib.sites',
]
SITE_ID = 1
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATIC_ROOT = os.path.join(PROJECT_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
django_heroku.settings(locals())
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
DATA_UPLOAD_MAX_NUMBER_FIELDS = 100000
# Wagtail settings
WAGTAIL_SITE_NAME = "{{ project_name }}"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = config('BASE_URL', default='BASE_URL')
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
if ENVIRONMENT != 'local':
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
USER_AGENTS_CACHE = None
|
the-stack_0_14360 | import sys
import time
from typing import Any, List, Optional
import tempfile
import pytest
import inspect
import requests
from fastapi import (Cookie, Depends, FastAPI, Header, Query, Request,
APIRouter, BackgroundTasks)
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
import ray
from ray import serve
from ray.serve.http_util import make_fastapi_class_based_view
def test_fastapi_function(serve_instance):
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.deployment(name="f")
@serve.ingress(app)
class FastAPIApp:
pass
FastAPIApp.deploy()
resp = requests.get("http://localhost:8000/f/100")
assert resp.json() == {"result": 100}
resp = requests.get("http://localhost:8000/f/not-number")
assert resp.status_code == 422 # Unprocessable Entity
assert resp.json()["detail"][0]["type"] == "type_error.integer"
def test_ingress_prefix(serve_instance):
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.deployment(route_prefix="/api")
@serve.ingress(app)
class App:
pass
App.deploy()
resp = requests.get("http://localhost:8000/api/100")
assert resp.json() == {"result": 100}
def test_class_based_view(serve_instance):
app = FastAPI()
@app.get("/other")
def hello():
return "hello"
@serve.deployment(name="f")
@serve.ingress(app)
class A:
def __init__(self):
self.val = 1
@app.get("/calc/{i}")
def b(self, i: int):
return i + self.val
@app.post("/calc/{i}")
def c(self, i: int):
return i - self.val
def other(self, msg: str):
return msg
A.deploy()
# Test HTTP calls.
resp = requests.get("http://localhost:8000/f/calc/41")
assert resp.json() == 42
resp = requests.post("http://localhost:8000/f/calc/41")
assert resp.json() == 40
resp = requests.get("http://localhost:8000/f/other")
assert resp.json() == "hello"
# Test handle calls.
handle = A.get_handle()
assert ray.get(handle.b.remote(41)) == 42
assert ray.get(handle.c.remote(41)) == 40
assert ray.get(handle.other.remote("world")) == "world"
def test_make_fastapi_cbv_util():
app = FastAPI()
class A:
@app.get("/{i}")
def b(self, i: int):
pass
# before, "self" is treated as a query params
assert app.routes[-1].endpoint == A.b
assert app.routes[-1].dependant.query_params[0].name == "self"
assert len(app.routes[-1].dependant.dependencies) == 0
make_fastapi_class_based_view(app, A)
# after, "self" is treated as a dependency instead of query params
assert app.routes[-1].endpoint == A.b
assert len(app.routes[-1].dependant.query_params) == 0
assert len(app.routes[-1].dependant.dependencies) == 1
self_dep = app.routes[-1].dependant.dependencies[0]
assert self_dep.name == "self"
assert inspect.isfunction(self_dep.call)
assert "get_current_servable" in str(self_dep.call)
def test_fastapi_features(serve_instance):
app = FastAPI(openapi_url="/my_api.json")
@app.on_event("startup")
def inject_state():
app.state.state_one = "app.state"
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
class Nested(BaseModel):
val: int
class BodyType(BaseModel):
name: str
price: float = Field(None, gt=1.0, description="High price!")
nests: Nested
class RespModel(BaseModel):
ok: bool
vals: List[Any]
file_path: str
async def yield_db():
yield "db"
async def common_parameters(q: Optional[str] = None):
return {"q": q}
@app.exception_handler(ValueError)
async def custom_handler(_: Request, exc: ValueError):
return JSONResponse(
status_code=500,
content={
"custom_error": "true",
"message": str(exc)
})
def run_background(background_tasks: BackgroundTasks):
path = tempfile.mktemp()
def write_to_file(p):
with open(p, "w") as f:
f.write("hello")
background_tasks.add_task(write_to_file, path)
return path
app.add_middleware(CORSMiddleware, allow_origins="*")
@app.get("/{path_arg}", response_model=RespModel, status_code=201)
async def func(
path_arg: str,
query_arg: str,
body_val: BodyType,
backgrounds_tasks: BackgroundTasks,
do_error: bool = False,
query_arg_valid: Optional[str] = Query(None, min_length=3),
cookie_arg: Optional[str] = Cookie(None),
user_agent: Optional[str] = Header(None),
commons: dict = Depends(common_parameters),
db=Depends(yield_db),
):
if do_error:
raise ValueError("bad input")
path = run_background(backgrounds_tasks)
return RespModel(
ok=True,
vals=[
path_arg,
query_arg,
body_val.price,
body_val.nests.val,
do_error,
query_arg_valid,
cookie_arg,
user_agent.split("/")[0], # returns python-requests
commons,
db,
app.state.state_one,
],
file_path=path,
)
router = APIRouter(prefix="/prefix")
@router.get("/subpath")
def router_path():
return "ok"
app.include_router(router)
@serve.deployment(name="fastapi")
@serve.ingress(app)
class Worker:
pass
Worker.deploy()
url = "http://localhost:8000/fastapi"
resp = requests.get(f"{url}/")
assert resp.status_code == 404
assert "x-process-time" in resp.headers
resp = requests.get(f"{url}/my_api.json")
assert resp.status_code == 200
assert resp.json() # it returns a well-formed json.
resp = requests.get(f"{url}/docs")
assert resp.status_code == 200
assert "<!DOCTYPE html>" in resp.text
resp = requests.get(f"{url}/redoc")
assert resp.status_code == 200
assert "<!DOCTYPE html>" in resp.text
resp = requests.get(f"{url}/path_arg")
assert resp.status_code == 422 # Malformed input
resp = requests.get(
f"{url}/path_arg",
json={
"name": "serve",
"price": 12,
"nests": {
"val": 1
}
},
params={
"query_arg": "query_arg",
"query_arg_valid": "at-least-three-chars",
"q": "common_arg",
})
assert resp.status_code == 201, resp.text
assert resp.json()["ok"]
assert resp.json()["vals"] == [
"path_arg",
"query_arg",
12.0,
1,
False,
"at-least-three-chars",
None,
"python-requests",
{
"q": "common_arg"
},
"db",
"app.state",
]
assert open(resp.json()["file_path"]).read() == "hello"
resp = requests.get(
f"{url}/path_arg",
json={
"name": "serve",
"price": 12,
"nests": {
"val": 1
}
},
params={
"query_arg": "query_arg",
"query_arg_valid": "at-least-three-chars",
"q": "common_arg",
"do_error": "true"
})
assert resp.status_code == 500
assert resp.json()["custom_error"] == "true"
resp = requests.get(f"{url}/prefix/subpath")
assert resp.status_code == 200
resp = requests.get(
f"{url}/docs",
headers={
"Access-Control-Request-Method": "GET",
"Origin": "https://googlebot.com"
})
assert resp.headers["access-control-allow-origin"] == "*", resp.headers
def test_fast_api_mounted_app(serve_instance):
app = FastAPI()
subapp = FastAPI()
@subapp.get("/hi")
def hi():
return "world"
app.mount("/mounted", subapp)
@serve.deployment(route_prefix="/api")
@serve.ingress(app)
class A:
pass
A.deploy()
assert requests.get(
"http://localhost:8000/api/mounted/hi").json() == "world"
def test_fastapi_init_lifespan_should_not_shutdown(serve_instance):
app = FastAPI()
@app.on_event("shutdown")
async def shutdown():
1 / 0
@serve.deployment
@serve.ingress(app)
class A:
def f(self):
return 1
A.deploy()
# Without a proper fix, the actor won't be initialized correctly.
# Because it will crash on each startup.
assert ray.get(A.get_handle().f.remote()) == 1
def test_fastapi_duplicate_routes(serve_instance):
app = FastAPI()
@serve.deployment(route_prefix="/api/v1")
@serve.ingress(app)
class App1:
@app.get("/")
def func_v1(self):
return "first"
@serve.deployment(route_prefix="/api/v2")
@serve.ingress(app)
class App2:
@app.get("/")
def func_v2(self):
return "second"
@app.get("/ignored")
def ignored():
pass
App1.deploy()
App2.deploy()
resp = requests.get("http://localhost:8000/api/v1")
assert resp.json() == "first"
resp = requests.get("http://localhost:8000/api/v2")
assert resp.json() == "second"
for version in ["v1", "v2"]:
resp = requests.get(f"http://localhost:8000/api/{version}/ignored")
assert resp.status_code == 404
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows")
@pytest.mark.parametrize("route_prefix", [None, "/", "/subpath"])
def test_doc_generation(serve_instance, route_prefix):
app = FastAPI()
@serve.deployment(route_prefix=route_prefix)
@serve.ingress(app)
class App:
@app.get("/")
def func1(self, arg: str):
return "hello"
App.deploy()
if route_prefix is None:
prefix = "/App"
else:
prefix = route_prefix
if not prefix.endswith("/"):
prefix += "/"
r = requests.get(f"http://localhost:8000{prefix}openapi.json")
assert r.status_code == 200
assert len(r.json()["paths"]) == 1
assert "/" in r.json()["paths"]
assert len(r.json()["paths"]["/"]) == 1
assert "get" in r.json()["paths"]["/"]
r = requests.get(f"http://localhost:8000{prefix}docs")
assert r.status_code == 200
@serve.deployment(route_prefix=route_prefix)
@serve.ingress(app)
class App:
@app.get("/")
def func1(self, arg: str):
return "hello"
@app.post("/hello")
def func2(self, arg: int):
return "hello"
App.deploy()
r = requests.get(f"http://localhost:8000{prefix}openapi.json")
assert r.status_code == 200
assert len(r.json()["paths"]) == 2
assert "/" in r.json()["paths"]
assert len(r.json()["paths"]["/"]) == 1
assert "get" in r.json()["paths"]["/"]
assert "/hello" in r.json()["paths"]
assert len(r.json()["paths"]["/hello"]) == 1
assert "post" in r.json()["paths"]["/hello"]
r = requests.get(f"http://localhost:8000{prefix}docs")
assert r.status_code == 200
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
the-stack_0_14362 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('settings', '0003_settings_action_delete_confirm'),
]
operations = [
migrations.RemoveField(
model_name='settings',
name='id',
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.OneToOneField(default=None, primary_key=True, to=settings.AUTH_USER_MODEL, related_name='settings', serialize=False),
),
]
|
the-stack_0_14363 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
('unix', 'unix'),
('pyboard', 'the pyboard'),
('wipy', 'the WiPy'),
('esp8266', 'the ESP8266'),
))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'port_version':micropy_version,
'all_ports':[
(port_id, url_pattern % (micropy_version, port_id))
for port_id, port_name in ports.items()
],
'all_versions':[
(ver, url_pattern % (ver, micropy_port))
for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % (micropy_version, 'micropython-%s.pdf' % micropy_port)),
],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx_selective_exclude.modindex_exclude',
'sphinx_selective_exclude.eager_only',
'sphinx_selective_exclude.search_auto_exclude',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014-2017, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.8'
# The full version, including alpha/beta/rc tags.
release = '1.8.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
modules_port_specific = {
'pyboard': ['pyb'],
'wipy': ['wipy'],
'esp8266': ['esp'],
}
modindex_exclude = []
for p, l in modules_port_specific.items():
if p != micropy_port:
modindex_exclude += l
# Exclude extra modules per port
modindex_exclude += {
'esp8266': ['cmath', 'select'],
'wipy': ['cmath'],
}.get(micropy_port, [])
|
the-stack_0_14367 | import sys
import discord
token = sys.argv[1]
client = discord.Client()
@client.event
async def on_ready():
for server in client.guilds:
await server.leave()
await client.close()
client.run(token, bot=False)
|
the-stack_0_14368 | import hashlib
from scapy.all import IP, TCP, PcapReader, rdpcap, wrpcap
from tcp_reliable.packet_helper import getPacketTimestamp, changeTimestamp, writePcap, genKey
class Extractor:
def __init__(self, pcapConfig, BUFFER_SIZE):
self.pcapConfig = pcapConfig
self.BUFFER_SIZE = BUFFER_SIZE
def extract(self):
serverPcap = rdpcap(self.pcapConfig['CLIENT_PCAP_PATH_OUTPUT'])
output = []
lastTimestamp = 0
serverPcap.sort(key=self.getKeySort)
last_seq = 0
limit = 0
buff = [None]* self.BUFFER_SIZE
sol = []
for pkt in serverPcap:
if pkt[TCP].dport == self.pcapConfig['CLIENT_PORT']:
timestamp = getPacketTimestamp(pkt)[0]
if timestamp == None:
continue
seq = pkt[TCP].seq
if lastTimestamp != timestamp and limit < timestamp and seq!= last_seq:
# if count >= 179 and count <= 281:
# print('seq:', pkt[TCP].seq, 'timestamp', timestamp, 'value', timestamp%2,'last_tm:', lastTimestamp)
# text+=str(timestamp%2)
# print("seq:", seq, "timestamp:", timestamp, "bit:", timestamp%2)
output.append(timestamp%2)
idx = self.getBufferIdx(seq)
buff[idx] = timestamp%2
# print("******",len(sol)+1,"***** seq",seq,"*****","idx",idx,"******* bit:",timestamp%2)
if idx == 0 and timestamp%2 == 1:
has_none = False
for i in buff[1:]:
if i == None:
has_none = True
if not has_none:
sol.append(buff[1:])
buff = [None]* self.BUFFER_SIZE
lastTimestamp = timestamp
limit = max(limit, timestamp)
last_seq = seq
return sol
def getKeySort(self, pkt):
seq = pkt[TCP].seq
timestamp = getPacketTimestamp(pkt)[0]
if timestamp == None:
return int(str(seq)+'0')
return int(str(timestamp)+str(seq))
def genHashNumber(self, num):
return int(hashlib.sha256(str(num).encode()).hexdigest(), base=16)
def getBufferIdx(self, seq):
return self.genHashNumber(seq) % self.BUFFER_SIZE
# if __name__ == '__main__':
# readMessage() |
the-stack_0_14369 | """
Submodule for working with geochemical data.
"""
import logging
import pandas as pd
import numpy as np
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
from ..util.meta import update_docstring_references
from ..util import units
from . import parse
from . import transform
from . import norm
from .ind import (
common_elements,
common_oxides,
__common_elements__,
__common_oxides__,
REE,
)
# note that only some of these methods will be valid for series
@pd.api.extensions.register_series_accessor("pyrochem")
@pd.api.extensions.register_dataframe_accessor("pyrochem")
class pyrochem(object):
"""
Custom dataframe accessor for pyrolite geochemistry.
"""
def __init__(self, obj):
self._validate(obj)
self._obj = obj
@staticmethod
def _validate(obj):
pass
# pyrolite.geochem.ind functions
@property
def list_elements(self):
"""
Get the subset of columns which are element names.
Returns
--------
:class:`list`
Notes
-------
The list will have the same ordering as the source DataFrame.
"""
fltr = self._obj.columns.isin(__common_elements__)
return self._obj.columns[fltr].tolist()
@property
def list_REE(self):
"""
Get the subset of columns which are Rare Earth Element names.
Returns
--------
:class:`list`
Notes
-------
The returned list will reorder REE based on atomic number.
"""
return [i for i in REE() if i in self._obj.columns]
@property
def list_oxides(self):
"""
Get the subset of columns which are oxide names.
Returns
--------
:class:`list`
Notes
-------
The list will have the same ordering as the source DataFrame.
"""
fltr = self._obj.columns.isin(__common_oxides__)
return self._obj.columns[fltr].tolist()
@property
def list_compositional(self):
return list(self.list_oxides + self.list_elements)
@property
def elements(self):
"""
Get an elemental subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj[self.list_elements]
@elements.setter
def elements(self, df):
self._obj.loc[:, self.list_elements] = df
@property
def REE(self):
"""
Get a Rare Earth Element subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj[self.list_REE]
@REE.setter
def REE(self, df):
self._obj.loc[:, self.list_REE] = df
@property
def oxides(self):
"""
Get an oxide subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj.loc[:, self.list_oxides]
@oxides.setter
def oxides(self, df):
self._obj.loc[:, self.list_oxides] = df
@property
def compositional(self):
"""
Get an oxide & elemental subset of a DataFrame.
Returns
--------
:class:`pandas.Dataframe`
"""
return self._obj.loc[:, self.list_compositional]
@compositional.setter
def compositional(self, df):
self._obj.loc[:, self.list_compositional] = df
# pyrolite.geochem.parse functions
def check_multiple_cation_inclusion(self, exclude=["LOI", "FeOT", "Fe2O3T"]):
"""
Returns cations which are present in both oxide and elemental form.
Parameters
-----------
exclude : :class:`list`, :code:`["LOI", "FeOT", "Fe2O3T"]`
List of components to exclude from the duplication check.
Returns
--------
:class:`set`
Set of elements for which multiple components exist in the dataframe.
"""
return parse.check_multiple_cation_inclusion(self._obj, exclude=exclude)
# pyrolite.geochem.transform functions
def to_molecular(self, renorm=True):
"""
Converts mass quantities to molar quantities.
Parameters
-----------
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after converting to relative moles.
Notes
------
Does not convert units (i.e. mass% --> mol%; mass-ppm --> mol-ppm).
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.to_molecular(self._obj, renorm=renorm)
return self._obj
def to_weight(self, renorm=True):
"""
Converts molar quantities to mass quantities.
Parameters
-----------
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after converting to relative moles.
Notes
------
Does not convert units (i.e. mol% --> mass%; mol-ppm --> mass-ppm).
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.to_weight(self._obj, renorm=renorm)
return self._obj
def devolatilise(
self, exclude=["H2O", "H2O_PLUS", "H2O_MINUS", "CO2", "LOI"], renorm=True
):
"""
Recalculates components after exclusion of volatile phases (e.g. H2O, CO2).
Parameters
-----------
exclude : :class:`list`
Components to exclude from the dataset.
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after devolatilisation.
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.devolatilise(self._obj, exclude=exclude, renorm=renorm)
return self._obj
def elemental_sum(
self, component=None, to=None, total_suffix="T", logdata=False, molecular=False
):
"""
Sums abundance for a cation to a single series, starting from a
dataframe containing multiple componnents with a single set of units.
Parameters
----------
component : :class:`str`
Component indicating which element to aggregate.
to : :class:`str`
Component to cast the output as.
logdata : :class:`bool`, :code:`False`
Whether data has been log transformed.
molecular : :class:`bool`, :code:`False`
Whether to perform a sum of molecular data.
Returns
-------
:class:`pandas.Series`
Series with cation aggregated.
"""
return transform.elemental_sum(
self._obj,
component=component,
to=to,
total_suffix=total_suffix,
logdata=logdata,
molecular=molecular,
)
def aggregate_element(
self, to, total_suffix="T", logdata=False, renorm=False, molecular=False
):
"""
Aggregates cation information from oxide and elemental components to either a
single species or a designated mixture of species.
Parameters
----------
to : :class:`str` | :class:`~periodictable.core.Element` | :class:`~periodictable.formulas.Formula` | :class:`dict`
Component(s) to convert to. If one component is specified, the element will be
converted to the target species.
If more than one component is specified with proportions in a dictionary
(e.g. :code:`{'FeO': 0.9, 'Fe2O3': 0.1}`), the components will be split as a
fraction of the elemental sum.
renorm : :class:`bool`, :code:`True`
Whether to renormalise the dataframe after recalculation.
total_suffix : :class:`str`, 'T'
Suffix of 'total' variables. E.g. 'T' for FeOT, Fe2O3T.
logdata : :class:`bool`, :code:`False`
Whether the data has been log transformed.
molecular : :class:`bool`, :code:`False`
Whether to perform a sum of molecular data.
Notes
-------
This won't convert units, so need to start from single set of units.
Returns
-------
:class:`pandas.Series`
Series with cation aggregated.
"""
return transform.aggregate_element(
self._obj,
to,
total_suffix=total_suffix,
logdata=logdata,
renorm=renorm,
molecular=molecular,
)
def recalculate_Fe(
self, to="FeOT", renorm=False, total_suffix="T", logdata=False, molecular=False
):
"""
Recalculates abundances of iron, and normalises a dataframe to contain either
a single species, or multiple species in certain proportions.
Parameters
-----------
to : :class:`str` | :class:`~periodictable.core.Element` | :class:`~periodictable.formulas.Formula` | :class:`dict`
Component(s) to convert to.
If one component is specified, all iron will be
converted to the target species.
If more than one component is specified with proportions in a dictionary
(e.g. :code:`{'FeO': 0.9, 'Fe2O3': 0.1}`), the components will be split as a
fraction of Fe.
renorm : :class:`bool`, :code:`False`
Whether to renormalise the dataframe after recalculation.
total_suffix : :class:`str`, 'T'
Suffix of 'total' variables. E.g. 'T' for FeOT, Fe2O3T.
logdata : :class:`bool`, :code:`False`
Whether the data has been log transformed.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
-------
:class:`pandas.DataFrame`
Transformed dataframe.
"""
self._obj = transform.recalculate_Fe(
self._obj,
to,
total_suffix=total_suffix,
logdata=logdata,
renorm=renorm,
molecular=molecular,
)
return self._obj
def get_ratio(self, ratio: str, alias: str = None, norm_to=None, molecular=False):
"""
Add a ratio of components A and B, given in the form of string 'A/B'.
Returned series be assigned an alias name.
Parameters
-----------
ratio : :class:`str`
String decription of ratio in the form A/B[_n].
alias : :class:`str`
Alternate name for ratio to be used as column name.
norm_to : :class:`str` | :class:`pyrolite.geochem.norm.Composition`, `None`
Reference composition to normalise to.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
-------
:class:`pandas.DataFrame`
Dataframe with ratio appended.
See Also
--------
:func:`~pyrolite.geochem.transform.add_MgNo`
"""
return transform.get_ratio(
self._obj, ratio, alias, norm_to=norm_to, molecular=molecular
)
def add_ratio(self, ratio: str, alias: str = None, norm_to=None, molecular=False):
"""
Add a ratio of components A and B, given in the form of string 'A/B'.
Returned series be assigned an alias name.
Parameters
-----------
ratio : :class:`str`
String decription of ratio in the form A/B[_n].
alias : :class:`str`
Alternate name for ratio to be used as column name.
norm_to : :class:`str` | :class:`pyrolite.geochem.norm.Composition`, `None`
Reference composition to normalise to.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
-------
:class:`pandas.DataFrame`
Dataframe with ratio appended.
See Also
--------
:func:`~pyrolite.geochem.transform.add_MgNo`
"""
r = self.get_ratio(ratio, alias, norm_to=norm_to, molecular=molecular)
self._obj[r.name] = r
return self._obj
def add_MgNo(
self, molecular=False, use_total_approx=False, approx_Fe203_frac=0.1, name="Mg#"
):
"""
Append the magnesium number to a dataframe.
Parameters
----------
molecular : :class:`bool`, :code:`False`
Whether the input data is molecular.
use_total_approx : :class:`bool`, :code:`False`
Whether to use an approximate calculation using total iron rather than just FeO.
approx_Fe203_frac : :class:`float`
Fraction of iron which is oxidised, used in approximation mentioned above.
name : :class:`str`
Name to use for the Mg Number column.
Returns
-------
:class:`pandas.DataFrame`
Dataframe with ratio appended.
See Also
--------
:func:`~pyrolite.geochem.transform.add_ratio`
"""
transform.add_MgNo(
self._obj,
molecular=molecular,
use_total_approx=use_total_approx,
approx_Fe203_frac=approx_Fe203_frac,
name=name,
)
return self._obj
def lambda_lnREE(
self,
norm_to="Chondrite_PON",
exclude=["Pm", "Eu"],
params=None,
degree=4,
append=[],
scale="ppm",
**kwargs
):
"""
Calculates orthogonal polynomial coefficients (lambdas) for a given set of REE data,
normalised to a specific composition [#localref_1]_. Lambda factors are given for the
radii vs. ln(REE/NORM) polynomial combination.
Parameters
------------
norm_to : :class:`str` | :class:`~pyrolite.geochem.norm.Composition` | :class:`numpy.ndarray`
Which reservoir to normalise REE data to (defaults to :code:`"Chondrite_PON"`).
exclude : :class:`list`, :code:`["Pm", "Eu"]`
Which REE elements to exclude from the fit. May wish to include Ce for minerals
in which Ce anomalies are common.
params : :class:`list`, :code:`None`
Set of predetermined orthagonal polynomial parameters.
degree : :class:`int`, 5
Maximum degree polynomial fit component to include.
append : :class:`list`, :code:`None`
Whether to append lambda function (i.e. :code:`["function"]`).
scale : :class:`str`
Current units for the REE data, used to scale the reference dataset.
References
-----------
.. [#localref_1] O’Neill HSC (2016) The Smoothness and Shapes of Chondrite-normalized
Rare Earth Element Patterns in Basalts. J Petrology 57:1463–1508.
doi: `10.1093/petrology/egw047 <https://dx.doi.org/10.1093/petrology/egw047>`__
See Also
---------
:func:`~pyrolite.geochem.ind.get_ionic_radii`
:func:`~pyrolite.util.math.lambdas`
:func:`~pyrolite.util.math.OP_constants`
:func:`~pyrolite.plot.REE_radii_plot`
"""
return transform.lambda_lnREE(
self._obj,
norm_to=norm_to,
exclude=exclude,
params=params,
degree=degree,
append=append,
scale=scale,
**kwargs
)
def convert_chemistry(self, to=[], logdata=False, renorm=False, molecular=False):
"""
Attempts to convert a dataframe with one set of components to another.
Parameters
-----------
to : :class:`list`
Set of columns to try to extract from the dataframe.
Can also include a dictionary for iron speciation.
See :func:`pyrolite.geochem.recalculate_Fe`.
logdata : :class:`bool`, :code:`False`
Whether chemical data has been log transformed. Necessary for aggregation
functions.
renorm : :class:`bool`, :code:`False`
Whether to renormalise the data after transformation.
molecular : :class:`bool`, :code:`False`
Flag that data is in molecular units, rather than weight units.
Returns
--------
:class:`pandas.DataFrame`
Dataframe with converted chemistry.
Todo
------
* Check for conflicts between oxides and elements
* Aggregator for ratios
* Implement generalised redox transformation.
* Add check for dicitonary components (e.g. Fe) in tests
"""
return transform.convert_chemistry(
self._obj, to=to, logdata=logdata, renorm=renorm, molecular=molecular
) # can't update the source nicely here, need to assign output
# pyrolite.geochem.norm functions
def normalize_to(self, reference=None, units=None, convert_first=False):
"""
Normalise a dataframe to a given reference composition.
Parameters
-----------
reference : :class:`str` | :class:`~pyrolite.geochem.norm.Composition` | :class:`numpy.ndarray`
Reference composition to normalise to.
units : :class:`str`
Units of the input dataframe, to convert the reference composition.
convert_first : :class:`bool`
Whether to first convert the referenece compostion before normalisation.
This is useful where elements are presented as different components (e.g.
Ti, TiO2).
Returns
--------
:class:`pandas.DataFrame`
Dataframe with normalised chemistry.
Notes
------
This assumes that dataframes have a single set of units.
"""
if isinstance(reference, (str, norm.Composition)):
if not isinstance(reference, norm.Composition):
N = norm.get_reference_composition(reference)
else:
N = reference
if units is not None:
N.set_units(units)
if convert_first:
N.comp = transform.convert_chemistry(N.comp, self.list_compositional)
norm_abund = N[self.list_compositional]
else: # list, iterable, pd.Index etc
norm_abund = np.array(reference)
assert len(norm_abund) == len(self.list_compositional)
# this list should have the same ordering as the input dataframe
return self._obj[self.list_compositional].div(norm_abund)
def denormalize_from(self, reference=None, units=None):
"""
De-normalise a dataframe from a given reference composition.
Parameters
-----------
reference : :class:`str` | :class:`~pyrolite.geochem.norm.Composition` | :class:`numpy.ndarray`
Reference composition which the composition is normalised to.
units : :class:`str`
Units of the input dataframe, to convert the reference composition.
Returns
--------
:class:`pandas.DataFrame`
Dataframe with normalised chemistry.
Notes
------
This assumes that dataframes have a single set of units.
"""
if isinstance(reference, (str, norm.Composition)):
if not isinstance(reference, norm.Composition):
N = norm.get_reference_composition(reference)
else:
N = reference
if units is not None:
N.set_units(units)
N.comp = transform.convert_chemistry(N.comp, self.list_compositional)
norm_abund = N[self.list_compositional]
else: # list, iterable, pd.Index etc
norm_abund = np.array(reference)
assert len(norm_abund) == len(self.list_compositional)
return self._obj[self.list_compositional] * norm_abund
def scale(self, in_unit, target_unit="ppm"):
"""
Scale a dataframe from one set of units to another.
Parameters
-----------
in_unit : :class:`str`
Units to be converted from
target_unit : :class:`str`, :code:`"ppm"`
Units to scale to.
Returns
--------
:class:`pandas.DataFrame`
Dataframe with new scale.
"""
return self._obj * units.scale(in_unit, target_unit)
pyrochem.lambda_lnREE = update_docstring_references(
pyrochem.lambda_lnREE, ref="localref"
)
|
the-stack_0_14370 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.spider import SitemapSpider
from scrapy.http.request import Request
import json
from urllib.parse import urlencode
class BibaSpider(scrapy.Spider):
name = 'biba'
allowed_domains = ['www.biba.in']
start_urls = [
'https://www.biba.in/new-arrivals',
'https://www.biba.in/mix-and-match',
'https://www.biba.in/suit-sets',
'https://www.biba.in/girls',
'https://www.biba.in/easy-stitch',
'https://www.biba.in/jewellery',
'https://www.biba.in/factory-outlet',
'https://www.biba.in/factory-outlet'
]
def parse(self, response):
ignore_paths = [
'/registration/', '/careers', 'sitemap', 'privacy', 'terms-of-use', 'about-us'
'/payments-options', 'help-faq', 'delivery-and-shipping-policy', 'business-enquiries'
'/returns-and-cancellation-policy', 'contact-us', 'trackorder', 'store-locator', '/faq/'
]
pages = len(response.xpath(".//*[@class='pager']/text()").extract())
if pages:
# print(pages)
pattern_match_text = r"var\sobjShowCaseInputs\s=\s({.*});"
try:
data = json.loads(response.xpath(".//script[@type='text/javascript']/text()").re_first(pattern_match_text))
for i in range(1, pages + 1):
data['PageNo'] = i
# print(data)
encoded_url = "https://www.biba.in/Handler/ProductShowcaseHandler.ashx?ProductShowcaseInput=" + \
json.dumps(data)
# print(encoded_url)
yield Request(
encoded_url,
callback=self.parse
)
except Exception as e:
print(e)
links = set([link for link in response.xpath(".//a/@href").extract()])
links = [link for ipath in ignore_paths for link in links
if (ipath not in str(link).lower().strip()) and self.allowed_domains[0] in link]
for link in links:
# print(link)
if '/p/' in link:
yield Request(link, callback=self.extract_items)
if self.allowed_domains[0] in link:
yield Request(link, callback=self.parse)
def extract_items(self, response):
pattern_match_text = r"MartJack\s=({.*})"
data = response.xpath(".//script[@type='text/javascript']/text()").re_first(pattern_match_text)
product = json.loads(data)['PageInfo']
if product['PageType'] == 'product':
product['url'] = response.url
yield product
|
the-stack_0_14374 | # This file contains dictionaries used in the Dalvik Format.
# https://source.android.com/devices/tech/dalvik/dex-format#type-codes
TYPE_MAP_ITEM = {
0x0: "TYPE_HEADER_ITEM",
0x1: "TYPE_STRING_ID_ITEM",
0x2: "TYPE_TYPE_ID_ITEM",
0x3: "TYPE_PROTO_ID_ITEM",
0x4: "TYPE_FIELD_ID_ITEM",
0x5: "TYPE_METHOD_ID_ITEM",
0x6: "TYPE_CLASS_DEF_ITEM",
0x1000: "TYPE_MAP_LIST",
0x1001: "TYPE_TYPE_LIST",
0x1002: "TYPE_ANNOTATION_SET_REF_LIST",
0x1003: "TYPE_ANNOTATION_SET_ITEM",
0x2000: "TYPE_CLASS_DATA_ITEM",
0x2001: "TYPE_CODE_ITEM",
0x2002: "TYPE_STRING_DATA_ITEM",
0x2003: "TYPE_DEBUG_INFO_ITEM",
0x2004: "TYPE_ANNOTATION_ITEM",
0x2005: "TYPE_ENCODED_ARRAY_ITEM",
0x2006: "TYPE_ANNOTATIONS_DIRECTORY_ITEM",
}
# https://source.android.com/devices/tech/dalvik/dex-format#access-flags
ACCESS_FLAGS = {
0x1: 'public',
0x2: 'private',
0x4: 'protected',
0x8: 'static',
0x10: 'final',
0x20: 'synchronized',
0x40: 'bridge',
0x80: 'varargs',
0x100: 'native',
0x200: 'interface',
0x400: 'abstract',
0x800: 'strictfp',
0x1000: 'synthetic',
0x4000: 'enum',
0x8000: 'unused',
0x10000: 'constructor',
0x20000: 'synchronized',
}
# https://source.android.com/devices/tech/dalvik/dex-format#typedescriptor
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
|
the-stack_0_14384 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ProviderPaged(Paged):
"""
A paging container for iterating over a list of :class:`Provider <azure.mgmt.resource.resources.v2017_05_10.models.Provider>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Provider]'}
}
def __init__(self, *args, **kwargs):
super(ProviderPaged, self).__init__(*args, **kwargs)
|
the-stack_0_14387 | from ROAR.agent_module.agent import Agent
from pathlib import Path
from ROAR.control_module.pid_controller import PIDController
from ROAR.planning_module.local_planner.rl_local_planner import RLLocalPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.waypoint_following_mission_planner import WaypointFollowingMissionPlanner
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
import logging
from ROAR.utilities_module.occupancy_map import OccupancyGridMap
from ROAR.perception_module.obstacle_from_depth import ObstacleFromDepth
from ROAR.planning_module.local_planner.simple_waypoint_following_local_planner import \
SimpleWaypointFollowingLocalPlanner
import numpy as np
from typing import Any
class RLLocalPlannerAgent(Agent):
def __init__(self, target_speed=40, **kwargs):
super().__init__(**kwargs)
self.target_speed = target_speed
self.logger = logging.getLogger("PID Agent")
self.route_file_path = Path(self.agent_settings.waypoint_file_path)
self.pid_controller = PIDController(agent=self, steering_boundary=(-1, 1), throttle_boundary=(0, 1))
self.mission_planner = WaypointFollowingMissionPlanner(agent=self)
# initiated right after mission plan
self.behavior_planner = BehaviorPlanner(agent=self)
self.local_planner = RLLocalPlanner(
agent=self,
controller=self.pid_controller)
self.traditional_local_planner = SimpleWaypointFollowingLocalPlanner(
agent=self,
controller=self.pid_controller,
mission_planner=self.mission_planner,
behavior_planner=self.behavior_planner,
closeness_threshold=1.5
)
self.absolute_maximum_map_size, self.map_padding = 1000, 40
self.occupancy_map = OccupancyGridMap(agent=self, threaded=True)
self.obstacle_from_depth_detector = ObstacleFromDepth(agent=self,threaded=True)
self.add_threaded_module(self.obstacle_from_depth_detector)
# self.add_threaded_module(self.occupancy_map)
self.logger.debug(
f"Waypoint Following Agent Initiated. Reading f"
f"rom {self.route_file_path.as_posix()}")
def run_step(self, vehicle: Vehicle,
sensors_data: SensorsData) -> VehicleControl:
super(RLLocalPlannerAgent, self).run_step(vehicle=vehicle,
sensors_data=sensors_data)
self.traditional_local_planner.run_in_series()
self.transform_history.append(self.vehicle.transform)
option = "obstacle_coords" # ground_coords, point_cloud_obstacle_from_depth
if self.kwargs.get(option, None) is not None:
points = self.kwargs[option]
self.occupancy_map.update(points)
control = self.local_planner.run_in_series()
return control
def get_obs(self):
ch1 = self.occupancy_map.get_map(transform=self.vehicle.transform,
view_size=(100, 100))
ch1 = np.expand_dims((ch1 * 255).astype(np.uint8), -1)
ch2 = np.zeros(shape=(100, 100, 1))
ch3 = np.zeros(shape=ch2.shape)
obs = np.concatenate([ch1, ch2, ch3], axis=2)
print(np.shape(obs))
return obs
|
the-stack_0_14390 | # coding=utf-8
# Copyright 2019 SK T-Brain Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import requests
import hashlib
import torch
from transformers import BertModel, BertConfig
import gluonnlp as nlp
from .utils import download as _download
from .utils import tokenizer
pytorch_kobert = {
'url':
'https://kobert.blob.core.windows.net/models/kobert/pytorch/pytorch_kobert_2439f391a6.params',
'fname': 'pytorch_kobert_2439f391a6.params',
'chksum': '2439f391a6'
}
bert_config = {
'attention_probs_dropout_prob': 0.1,
'hidden_act': 'gelu',
'hidden_dropout_prob': 0.1,
'hidden_size': 768,
'initializer_range': 0.02,
'intermediate_size': 3072,
'max_position_embeddings': 512,
'num_attention_heads': 12,
'num_hidden_layers': 12,
'type_vocab_size': 2,
'vocab_size': 8002
}
def get_pytorch_kobert_model(ctx='cpu', cachedir='~/kobert/'):
# download model
model_info = pytorch_kobert
model_path = _download(model_info['url'],
model_info['fname'],
model_info['chksum'],
cachedir=cachedir)
# download vocab
vocab_info = tokenizer
vocab_path = _download(vocab_info['url'],
vocab_info['fname'],
vocab_info['chksum'],
cachedir=cachedir)
return get_kobert_model(model_path, vocab_path, ctx)
def get_kobert_model(model_file, vocab_file, ctx="cpu"):
bertmodel = BertModel(config=BertConfig.from_dict(bert_config))
bertmodel.load_state_dict(torch.load(model_file))
device = torch.device(ctx)
bertmodel.to(device)
bertmodel.eval()
vocab_b_obj = nlp.vocab.BERTVocab.from_sentencepiece(vocab_file,
padding_token='[PAD]')
return bertmodel, vocab_b_obj
|
the-stack_0_14391 | import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers, models
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import os
import string
import numpy as np
from utils.convert import Convert
from conf.config import Config
def get_sample_data(path, size=500, height=70, width=130, cap_len=4, characters=""):
sample_dir = os.path.join(os.getcwd(), "img", path)
file_names = os.listdir(sample_dir)
sample_x = np.zeros([size, height*width])
sample_y = np.zeros([size, cap_len*len(characters)])
for seq in range(size):
captcha_image = np.array(Image.open(os.path.join(sample_dir, file_names[seq])))
captcha_text = file_names[seq].split(".png")[0]
image = Convert.convert_to_gray(captcha_image)
sample_x[seq, :] = image.flatten() / 255
sample_y[seq, :] = Convert.convert_to_vector(captcha_text, cap_len, characters)
return sample_x, sample_y
if __name__ == "__main__":
batch_size = 128
epochs = 15
IMG_HEIGHT = 70
IMG_WIDTH = 130
env_config = Config.load_env()
captcha_str_length = env_config["captcha_length"]
chars = ""
if env_config["captcha_has_number"]:
chars += string.digits
if env_config["captcha_has_lowercase"]:
chars += string.ascii_lowercase
if env_config["captcha_has_uppercase"]:
chars += string.ascii_uppercase
train_dir = os.path.join(os.getcwd(), "img", "train")
test_dir = os.path.join(os.getcwd(), "img", "test")
sample_images, sample_labels = get_sample_data(train_dir, size=500, height=IMG_HEIGHT, width=IMG_WIDTH, cap_len=captcha_str_length, characters=chars)
input_layer = tf.keras.Input()
x = layers.Conv2D(32, 3, activation='relu')(input_layer)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D((2, 2))(x)
# x = layers.Flatten()(x)
# x = layers.Dense(1024, activation='relu')(x)
# # x = layers.Dropout(0.5)(x)
#
# x = layers.Dense(D * N_LABELS, activation='softmax')(x)
# x = layers.Reshape((D, N_LABELS))(x)
model = models.Model(inputs=input_layer, outputs=x)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
|
the-stack_0_14392 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RktCompilerLib(RacketPackage):
"""Stub package for packages which are currently part of core
Racket installation (but which may change in the future)."""
git = "ssh://[email protected]/racket/racket.git"
maintainers = ['elfprince13']
version('8.3', commit='cab83438422bfea0e4bd74bc3e8305e6517cf25f') # tag='v8.3'
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
depends_on('[email protected]', type=('build', 'run'), when='@8.3')
name = 'compiler-lib'
pkgs = True
subdirectory = "pkgs/{0}".format(name)
|
the-stack_0_14394 | #! /usr/bin/env python3
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import shutil
import sys
import subprocess
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=True):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not shutil.which(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=True):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, str):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=True):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True,
start_new_session=True)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
background = False
redirect_stdout = True
# In remote_args, %s will be replaced with the requested URL. %action will
# be replaced depending on the value of 'new' passed to open.
# remote_action is used for new=0 (open). If newwin is not None, it is
# used for new=1 (open_new). If newtab is not None, it is used for
# new=3 (open_new_tab). After both substitutions are made, any empty
# strings in the transformed remote_args list will be removed.
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(autoraise)
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = subprocess.DEVNULL
else:
# for TTY browsers, we need stdin/out
inout = None
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, start_new_session=True)
if remote:
# wait at most five seconds. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
try:
rc = p.wait(5)
# if remote call failed, open() will try direct invocation
return not rc
except subprocess.TimeoutExpired:
return True
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=True):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
args = [arg for arg in args if arg]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla browsers."""
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "-new-window"
remote_action_newtab = "-new-tab"
background = True
class Netscape(UnixBrowser):
"""Launcher class for Netscape browser."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Chrome(UnixBrowser):
"Launcher class for Google Chrome browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
Chromium = Chrome
class Opera(UnixBrowser):
"Launcher class for Opera browser."
remote_args = ['%action', '%s']
remote_action = ""
remote_action_newwin = "--new-window"
remote_action_newtab = ""
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=True):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = subprocess.DEVNULL
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
start_new_session=True)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except OSError:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except OSError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=True):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# use xdg-open if around
if shutil.which("xdg-open"):
register("xdg-open", None, BackgroundBrowser("xdg-open"))
# The default GNOME3 browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gvfs-open"):
register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and shutil.which("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and shutil.which("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
if shutil.which("x-www-browser"):
register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
# The Mozilla browsers
for browser in ("firefox", "iceweasel", "iceape", "seamonkey"):
if shutil.which(browser):
register(browser, None, Mozilla(browser))
# The Netscape and old Mozilla browsers
for browser in ("mozilla-firefox",
"mozilla-firebird", "firebird",
"mozilla", "netscape"):
if shutil.which(browser):
register(browser, None, Netscape(browser))
# Konqueror/kfm, the KDE browser.
if shutil.which("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif shutil.which("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if shutil.which(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if shutil.which("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Google Chrome/Chromium browsers
for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
if shutil.which(browser):
register(browser, None, Chrome(browser))
# Opera, quite popular
if shutil.which("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if shutil.which("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if shutil.which("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
if shutil.which("www-browser"):
register("www-browser", None, GenericBrowser("www-browser"))
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if shutil.which("links"):
register("links", None, GenericBrowser("links"))
if shutil.which("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if shutil.which("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if shutil.which("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=True):
try:
os.startfile(url)
except OSError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if shutil.which(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=True):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
class MacOSXOSAScript(BaseBrowser):
def __init__(self, name):
self._name = name
def open(self, url, new=0, autoraise=True):
if self._name == 'default':
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
script = '''
tell application "%s"
activate
open location "%s"
end
'''%(self._name, url.replace('"', '%22'))
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("safari", None, MacOSXOSAScript('safari'), -1)
register("firefox", None, MacOSXOSAScript('firefox'), -1)
register("chrome", None, MacOSXOSAScript('chrome'), -1)
register("MacOSX", None, MacOSXOSAScript('default'), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, -1)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print(usage, file=sys.stderr)
sys.exit(1)
url = args[0]
open(url, new_win)
print("\a")
if __name__ == "__main__":
main()
|
the-stack_0_14397 | import argparse
import imp
import os
import re
from functools import wraps
from operator import methodcaller
import orca
from flask import (
Flask, abort, jsonify, request, render_template, redirect, url_for)
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
from six import StringIO
app = Flask(__name__)
_GROUPBY_AGG_MAP = {
'sum': methodcaller('sum'),
'mean': methodcaller('mean'),
'median': methodcaller('median'),
'std': methodcaller('std'),
'size': methodcaller('size')
}
def import_file(filename):
"""
Import a file that will trigger the population of Orca.
Parameters
----------
filename : str
"""
pathname, filename = os.path.split(filename)
modname = re.match(
r'(?P<modname>\w+)\.py', filename).group('modname')
file, path, desc = imp.find_module(modname, [pathname])
try:
imp.load_module(modname, file, path, desc)
finally:
file.close()
def check_is_table(func):
"""
Decorator that will check whether the "table_name" keyword argument
to the wrapped function matches a registered Orca table.
"""
@wraps(func)
def wrapper(**kwargs):
if not orca.is_table(kwargs['table_name']):
abort(404)
return func(**kwargs)
return wrapper
def check_is_column(func):
"""
Decorator that will check whether the "table_name" and "col_name"
keyword arguments to the wrapped function match a registered Orca
table and column.
"""
@wraps(func)
def wrapper(**kwargs):
table_name = kwargs['table_name']
col_name = kwargs['col_name']
if not orca.is_table(table_name):
abort(404)
if col_name not in orca.get_table(table_name).columns:
abort(404)
return func(**kwargs)
return wrapper
def check_is_injectable(func):
"""
Decorator that will check whether the "inj_name" keyword argument to
the wrapped function matches a registered Orca injectable.
"""
@wraps(func)
def wrapper(**kwargs):
name = kwargs['inj_name']
if not orca.is_injectable(name):
abort(404)
return func(**kwargs)
return wrapper
@app.route('/schema')
def schema():
"""
All tables, columns, steps, injectables and broadcasts registered with
Orca. Includes local columns on tables.
"""
tables = orca.list_tables()
cols = {t: orca.get_table(t).columns for t in tables}
steps = orca.list_steps()
injectables = orca.list_injectables()
broadcasts = orca.list_broadcasts()
return jsonify(
tables=tables, columns=cols, steps=steps, injectables=injectables,
broadcasts=broadcasts)
@app.route('/tables')
def list_tables():
"""
List all registered tables.
"""
tables = orca.list_tables()
return jsonify(tables=tables)
@app.route('/tables/<table_name>/info')
@check_is_table
def table_info(table_name):
"""
Return the text result of table.info(verbose=True).
"""
table = orca.get_table(table_name).to_frame()
buf = StringIO()
table.info(verbose=True, buf=buf)
info = buf.getvalue()
return info, 200, {'Content-Type': 'text/plain'}
@app.route('/tables/<table_name>/preview')
@check_is_table
def table_preview(table_name):
"""
Returns the first five rows of a table as JSON. Inlcudes all columns.
Uses Pandas' "split" JSON format.
"""
preview = orca.get_table(table_name).to_frame().head()
return (
preview.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/describe')
@check_is_table
def table_describe(table_name):
"""
Return summary statistics of a table as JSON. Includes all columns.
Uses Pandas' "split" JSON format.
"""
desc = orca.get_table(table_name).to_frame().describe()
return (
desc.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/definition')
@check_is_table
def table_definition(table_name):
"""
Get the source of a table function.
If a table is registered DataFrame and not a function then all that is
returned is {'type': 'dataframe'}.
If the table is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
if orca.table_type(table_name) == 'dataframe':
return jsonify(type='dataframe')
filename, lineno, source = \
orca.get_raw_table(table_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html)
@app.route('/tables/<table_name>/csv')
@check_is_table
def table_csv(table_name):
"""
Returns a table as text/csv using Pandas default csv output.
"""
csv = orca.get_table(table_name).to_frame().to_csv()
return csv, 200, {'Content-Type': 'text/csv'}
@app.route('/tables/<table_name>/groupbyagg')
@check_is_table
def table_groupbyagg(table_name):
"""
Perform a groupby on a table and return an aggregation on a single column.
This depends on some request parameters in the URL.
"column" and "agg" must always be present, and one of "by" or "level"
must be present. "column" is the table column on which aggregation will
be performed, "agg" is the aggregation that will be performed, and
"by"/"level" define how to group the data.
Supported "agg" parameters are: mean, median, std, sum, and size.
"""
table = orca.get_table(table_name)
# column to aggregate
column = request.args.get('column', None)
if not column or column not in table.columns:
abort(400)
# column or index level to group by
by = request.args.get('by', None)
level = request.args.get('level', None)
if (not by and not level) or (by and level):
abort(400)
# aggregation type
agg = request.args.get('agg', None)
if not agg or agg not in _GROUPBY_AGG_MAP:
abort(400)
column = table.get_column(column)
# level can either be an integer level number or a string level name.
# try converting to integer, but if that doesn't work
# we go ahead with the string.
if level:
try:
level = int(level)
except ValueError:
pass
gby = column.groupby(level=level)
else:
by = table.get_column(by)
gby = column.groupby(by)
result = _GROUPBY_AGG_MAP[agg](gby)
return (
result.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/columns')
@check_is_table
def list_table_columns(table_name):
"""
List columns for a specific table.
"""
return jsonify(columns=orca.get_table(table_name).columns)
@app.route('/tables/<table_name>/columns/<col_name>/preview')
@check_is_column
def column_preview(table_name, col_name):
"""
Return the first ten elements of a column as JSON in Pandas'
"split" format.
"""
col = orca.get_table(table_name).get_column(col_name).head(10)
return (
col.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/columns/<col_name>/definition')
@check_is_column
def column_definition(table_name, col_name):
"""
Get the source of a column function.
If a column is a registered Series and not a function then all that is
returned is {'type': 'series'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
col_type = orca.get_table(table_name).column_type(col_name)
if col_type != 'function':
return jsonify(type=col_type)
filename, lineno, source = \
orca.get_raw_column(table_name, col_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html)
@app.route('/tables/<table_name>/columns/<col_name>/describe')
@check_is_column
def column_describe(table_name, col_name):
"""
Return summary statistics of a column as JSON.
Uses Pandas' "split" JSON format.
"""
col_desc = orca.get_table(table_name).get_column(col_name).describe()
return (
col_desc.to_json(orient='split'),
200,
{'Content-Type': 'application/json'})
@app.route('/tables/<table_name>/columns/<col_name>/csv')
@check_is_column
def column_csv(table_name, col_name):
"""
Return a column as CSV using Pandas' default CSV output.
"""
csv = orca.get_table(table_name).get_column(col_name).to_csv(path_or_buf=None)
return csv, 200, {'Content-Type': 'text/csv'}
@app.route('/injectables')
def list_injectables():
"""
List all registered injectables.
"""
return jsonify(injectables=orca.list_injectables())
@app.route('/injectables/<inj_name>/repr')
@check_is_injectable
def injectable_repr(inj_name):
"""
Returns the type and repr of an injectable. JSON response has
"type" and "repr" keys.
"""
i = orca.get_injectable(inj_name)
return jsonify(type=str(type(i)), repr=repr(i))
@app.route('/injectables/<inj_name>/definition')
@check_is_injectable
def injectable_definition(inj_name):
"""
Get the source of an injectable function.
If an injectable is a registered Python variable and not a function
then all that is returned is {'type': 'variable'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
inj_type = orca.injectable_type(inj_name)
if inj_type == 'variable':
return jsonify(type='variable')
else:
filename, lineno, source = \
orca.get_injectable_func_source_data(inj_name)
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html)
@app.route('/broadcasts')
def list_broadcasts():
"""
List all registered broadcasts as a list of objects with
keys "cast" and "onto".
"""
casts = [{'cast': b[0], 'onto': b[1]} for b in orca.list_broadcasts()]
return jsonify(broadcasts=casts)
@app.route('/broadcasts/<cast_name>/<onto_name>/definition')
def broadcast_definition(cast_name, onto_name):
"""
Return the definition of a broadcast as an object with keys
"cast", "onto", "cast_on", "onto_on", "cast_index", and "onto_index".
These are the same as the arguments to the ``broadcast`` function.
"""
if not orca.is_broadcast(cast_name, onto_name):
abort(404)
b = orca.get_broadcast(cast_name, onto_name)
return jsonify(
cast=b.cast, onto=b.onto, cast_on=b.cast_on, onto_on=b.onto_on,
cast_index=b.cast_index, onto_index=b.onto_index)
@app.route('/steps')
def list_steps():
"""
List all registered Orca steps.
"""
return jsonify(steps=orca.list_steps())
@app.route('/steps/<step_name>/definition')
def step_definition(step_name):
"""
Get the source of a step function. Returned object has keys
"filename", "lineno", "text" and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
"""
if not orca.is_step(step_name):
abort(404)
filename, lineno, source = \
orca.get_step(step_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(filename=filename, lineno=lineno, text=source, html=html)
@app.route('/ui')
def ui():
return render_template('ui.html')
@app.route('/')
def root():
return redirect(url_for('ui'))
def parse_args(args=None):
parser = argparse.ArgumentParser(
description=(
'Start a Flask server that has HTTP endpoints that provide data '
'about an Orca configuration and data registered with Orca.'))
parser.add_argument(
'-d', '--debug', action='store_true',
help='Enable Flask\'s debug mode')
parser.add_argument(
'-H', '--host', type=str, help='Hostname on which to run the server')
parser.add_argument(
'-p', '--port', type=int, help='Port on which to run server')
parser.add_argument('filename', type=str, help='File with Orca config')
return parser.parse_args(args)
def main(args=None):
args = parse_args(args)
import_file(args.filename)
app.run(host=args.host, port=args.port, debug=args.debug)
|
the-stack_0_14398 | import turtle
# -- Function Definitions
def draw_board(x, y, size):
color = "red"
turtle.color("red")
start = 1
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
for n in range(8):
for n in range(8):
if start == 0:
if color == "red":
color = "black"
turtle.color("black")
elif color == "black":
color = "red"
turtle.color("red")
else:
start = 0
turtle.begin_fill()
for n in range(4):
turtle.forward(size)
turtle.right(90)
turtle.end_fill()
turtle.forward(size)
if color == "red":
turtle.color("black")
color="black"
elif color == "black":
turtle.color("red")
color="red"
turtle.right(90)
turtle.forward(size)
turtle.right(90)
turtle.forward(size * 8)
turtle.right(180)
def main():
turtle.speed(0)
draw_board(-100, 100, 50)
def gotomain():
main()
gotomain()
|
the-stack_0_14401 | #!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
from setuptools.extension import Extension
from codecs import open
import os
import re
import sys
from Cython.Build import cythonize
here = os.path.abspath(os.path.dirname(__file__))
sys.path.append(here)
import versioneer # noqa: E402
import cuda_ext # noqa: E402
CLASSIFIERS = """
Development Status :: 4 - Beta
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MINIMUM_VERSIONS = {
"numpy": "1.13",
"requests": "2.18",
"jax": "0.2.10",
}
CONSOLE_SCRIPTS = [
"veros = veros.cli.veros:cli",
"veros-run = veros.cli.veros_run:cli",
"veros-copy-setup = veros.cli.veros_copy_setup:cli",
"veros-resubmit = veros.cli.veros_resubmit:cli",
"veros-create-mask = veros.cli.veros_create_mask:cli",
]
PACKAGE_DATA = ["setups/*/assets.json", "setups/*/*.npy", "setups/*/*.png"]
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
def parse_requirements(reqfile):
requirements = []
with open(os.path.join(here, reqfile), encoding="utf-8") as f:
for line in f:
line = line.strip()
pkg = re.match(r"(\w+)\b.*", line).group(1)
if pkg in MINIMUM_VERSIONS:
line = "".join([line, ",>=", MINIMUM_VERSIONS[pkg]])
line = line.replace("==", "<=")
requirements.append(line)
return requirements
INSTALL_REQUIRES = parse_requirements("requirements.txt")
EXTRAS_REQUIRE = {
"test": ["pytest", "pytest-cov", "pytest-forked", "codecov", "xarray"],
}
EXTRAS_REQUIRE["jax"] = parse_requirements("requirements_jax.txt")
def get_extensions(require_cython_ext, require_cuda_ext):
cuda_info = cuda_ext.cuda_info
extension_modules = {
"veros.core.special.tdma_cython_": ["tdma_cython_.pyx"],
"veros.core.special.tdma_cuda_": ["tdma_cuda_.pyx", "cuda_tdma_kernels.cu"],
}
def is_cuda_ext(sources):
return any(source.endswith(".cu") for source in sources)
extensions = []
for module, sources in extension_modules.items():
extension_dir = os.path.join(*module.split(".")[:-1])
kwargs = dict()
if is_cuda_ext(sources):
kwargs.update(
library_dirs=cuda_info["lib64"],
libraries=["cudart"],
runtime_library_dirs=cuda_info["lib64"],
include_dirs=cuda_info["include"],
)
ext = Extension(
name=module,
sources=[os.path.join(extension_dir, f) for f in sources],
extra_compile_args={
"gcc": [],
"nvcc": cuda_info["cflags"],
},
**kwargs,
)
extensions.append(ext)
extensions = cythonize(extensions, language_level=3, exclude_failures=True)
for ext in extensions:
is_required = (not is_cuda_ext(ext.sources) and require_cython_ext) or (
is_cuda_ext(ext.sources) and require_cuda_ext
)
if not is_required:
ext.optional = True
return extensions
cmdclass = versioneer.get_cmdclass()
cmdclass.update(build_ext=cuda_ext.custom_build_ext)
def _env_to_bool(envvar):
return os.environ.get(envvar, "").lower() in ("1", "true", "on")
extensions = get_extensions(
require_cython_ext=_env_to_bool("VEROS_REQUIRE_CYTHON_EXT"),
require_cuda_ext=_env_to_bool("VEROS_REQUIRE_CUDA_EXT"),
)
setup(
name="veros",
license="MIT",
author="Dion Häfner (NBI Copenhagen)",
author_email="[email protected]",
keywords="oceanography python parallel numpy multi-core geophysics ocean-model mpi4py jax",
description="The versatile ocean simulator, in pure Python, powered by JAX.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://veros.readthedocs.io",
python_requires=">=3.7",
version=versioneer.get_version(),
cmdclass=cmdclass,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
ext_modules=extensions,
entry_points={"console_scripts": CONSOLE_SCRIPTS, "veros.setup_dirs": ["base = veros.setups"]},
package_data={"veros": PACKAGE_DATA},
classifiers=[c for c in CLASSIFIERS.split("\n") if c],
zip_safe=False,
)
|
the-stack_0_14402 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import peforth
import argparse
import sys
import time
import numpy as np
import tensorflow as tf
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name, input_height=299, input_width=299,
input_mean=0, input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
# peforth.ok('11> ',loc=locals(),cmd=":> [0] value locals cr")
if __name__ == "__main__":
file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg"
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_height = 224
input_width = 224
input_mean = 128
input_std = 128
input_layer = "input"
output_layer = "final_result"
parser = argparse.ArgumentParser()
parser.add_argument("--image", help="image to be processed")
parser.add_argument("--graph", help="graph/model to be executed")
parser.add_argument("--labels", help="name of file containing labels")
parser.add_argument("--input_height", type=int, help="input height")
parser.add_argument("--input_width", type=int, help="input width")
parser.add_argument("--input_mean", type=int, help="input mean")
parser.add_argument("--input_std", type=int, help="input std")
parser.add_argument("--input_layer", help="name of input layer")
parser.add_argument("--output_layer", help="name of output layer")
args = parser.parse_args()
if args.graph:
model_file = args.graph
if args.image:
file_name = args.image
if args.labels:
label_file = args.labels
if args.input_height:
input_height = args.input_height
if args.input_width:
input_width = args.input_width
if args.input_mean:
input_mean = args.input_mean
if args.input_std:
input_std = args.input_std
if args.input_layer:
input_layer = args.input_layer
if args.output_layer:
output_layer = args.output_layer
graph = load_graph(model_file)
t = read_tensor_from_image_file(file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.Session(graph=graph) as sess:
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
for i in top_k:
print(labels[i], results[i])
peforth.ok('22> ',loc=locals(),cmd=":> [0] value locals cr")
|
the-stack_0_14403 | import os
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import precision_score, recall_score
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from transformers import BertModel
class RNN_RNN(nn.Module):
def __init__(self, device, hps, loss_weights):
super(RNN_RNN, self).__init__()
self._hps = hps
self._device = device
self._loss_weight = loss_weights
now = datetime.now()
dt_str = now.strftime("%d-%m-%Y-%H-%M-%S")
comment = '__'.join([k + '_' + str(v) for k, v in self._hps.items()])
path = os.path.join(self._hps["path"], 'runs')
if not os.path.exists(path):
os.makedirs(path)
self._summary_writer = SummaryWriter(os.path.join(path, dt_str), comment=comment)
self._define_model()
self._optimizer = self._define_optimizer()
self._global_step = 0
def _define_model(self):
self._bert = BertModel.from_pretrained(self._hps['bert'], output_hidden_states=True)
self._emb_bert_dim = 768 * (4 if self._hps['emb_layers'] == 'concat' else 1)
self._fc_emb = nn.Linear(self._emb_bert_dim, self._hps['emb_dim'])
self._fc_emb_relu = nn.ReLU()
if self._hps['units'] == 'lstm':
self._net_1 = nn.LSTM(self._hps['emb_dim'], self._hps['hidden1'], bidirectional=True)
self._net_2 = nn.LSTM(self._hps['hidden1'] * 2, self._hps['hidden2'], bidirectional=False)
else:
self._net_1 = nn.GRU(self._hps['emb_dim'], self._hps['hidden1'], bidirectional=True)
self._net_2 = nn.GRU(self._hps['hidden1'] * 2, self._hps['hidden2'], bidirectional=False)
self._fc = nn.Linear(self._hps['hidden2'], 3)
self._dropout_emb = nn.Dropout(self._hps['prob_emb'])
self._dropout1 = nn.Dropout(self._hps['prob1'])
self._dropout2 = nn.Dropout(self._hps['prob2'])
self._softmax = nn.LogSoftmax(dim=1)
self._criterion = nn.NLLLoss(ignore_index=-1, reduction='mean', weight=self._loss_weight)
def _define_optimizer(self):
opt = torch.optim.SGD(self.parameters(), self._hps['lr'])
if self._hps['optimizer'] == 'ADAM':
opt = torch.optim.Adam(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
elif self._hps['optimizer'] == 'Adadelta':
opt = torch.optim.Adadelta(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
elif self._hps['optimizer'] == 'Adagrad':
opt = torch.optim.Adagrad(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
elif self._hps['optimizer'] == 'RSMProp':
opt = torch.optim.RMSprop(self.parameters(), self._hps['lr'], weight_decay=self._hps['weight'])
return opt
def _pointwise_max(self, tensors):
tensors = tensors.view(self._hps['msg_len'], -1, self._hps['batch_size'] * self._hps['conv_len'])
t_prev = tensors[0]
for t in tensors:
t_prev = torch.max(t_prev, t)
t_prev = t_prev.view(self._hps['batch_size'] * self._hps['conv_len'], -1)
return t_prev
def _extract_layer(self, hidden_states):
if self._hps['emb_layers'] == 'last':
return hidden_states[-1]
elif self._hps['emb_layers'] == 'second':
return hidden_states[-2]
elif self._hps['emb_layers'] == 'sum_all':
return torch.sum(torch.stack(hidden_states[1:]), dim=0) # exclude first layer (embedding)
elif self._hps['emb_layers'] == 'sum_four':
return torch.sum(torch.stack(hidden_states[-4:]), dim=0)
elif self._hps['emb_layers'] == 'concat':
return torch.cat(hidden_states[-4:], dim=2)
else:
return hidden_states[-1]
def _decode_softmax(self, pred, msk_conv):
pred = pred.view(self._hps['batch_size'] * self._hps['conv_len'], -1)
msk_conv = msk_conv.view(self._hps['batch_size'] * self._hps['conv_len'])
indeces = torch.nonzero(msk_conv, as_tuple=True)
preds = pred[indeces]
return list(map(lambda x: np.argmax(x), preds.tolist()))
def close_writer(self):
self._summary_writer.close()
def get_states(self):
return self.state_dict(), self._optimizer.state_dict(), self._global_step
def load_state(self, checkpoint):
self.load_model(checkpoint['state_dict'])
self._optimizer.load_state_dict(checkpoint['optimizer'])
self._global_step = checkpoint['step']
def forward(self, x, msk_conv, msk_msg):
"""
:param x: (batch, conv_len, msg_len+2)
:param msk_conv: (batch, conv_len)
:param msk_msg: (batch, conv_len, msg_len+2)
:return: (conv_len, batch, classes)
"""
input_ids = x.view(self._hps['batch_size'] * self._hps['conv_len'], -1) # batch * conv_len, sequence_length
attention_mask = msk_msg.view(self._hps['batch_size'] * self._hps['conv_len'], -1)
self._bert.eval()
with torch.no_grad():
bert_emb_out = self._bert(input_ids, attention_mask)
# word embeddings
bert_emb_states = bert_emb_out[2] # (batch * conv_len, sequence_length, hidden_size) for each layer (13)
bert_emb = self._extract_layer(bert_emb_states)
bert_emb = bert_emb[:, 1:-1, :] # discard special tokens
msk_msg = msk_msg[:, :, 1:-1]
if self._hps['emb_dim'] != self._emb_bert_dim:
bert_emb = self._fc_emb_relu(self._fc_emb(bert_emb)) # reduce dimensions
embed_x = self._dropout_emb(bert_emb)
# reshape x
embed_x = embed_x.reshape(self._hps['msg_len'], # msg_len
self._hps['batch_size'] * self._hps['conv_len'], # batch * conv_len
self._hps['emb_dim']) # hid_dim
# first net
net_in, _ = self._net_1(embed_x) # (msg_len, batch * conv_len, hidden)
msk_msg = msk_msg.reshape(self._hps['msg_len'], self._hps['batch_size'] * self._hps['conv_len']).unsqueeze(-1)
net_in = net_in * msk_msg # remove padding
dropped = self._dropout1(net_in)
first_net = self._pointwise_max(dropped)
first_net = first_net.view(self._hps['conv_len'], self._hps['batch_size'], self._hps['hidden1'] * 2)
# second net
net_out, _ = self._net_2(first_net) # conv_len, batch, hidden
second_net = self._dropout2(net_out)
# prediction
msgs = []
for msg in second_net:
out = self._fc(msg) # batch, classes
out = self._softmax(out)
msgs.append(out)
output = torch.stack(msgs) # conv_len, batch, classes
msk_conv = msk_conv.view(self._hps['conv_len'], self._hps['batch_size']).unsqueeze(-1)
output = output * msk_conv
return output
def fit(self, x, y, msk_conv, msk_msg):
"""
Train the model
:param x: input sequence (batch, conv_len, msg_len+2)
:param y: target sequence (batch, conv_len)
:param msk_conv: conversation mask (batch, conv_len)
:param msk_msg: message mask (batch, conv_len, msg_len+2)
:return: loss value, step
"""
self.train()
self._optimizer.zero_grad()
preds = self(x, msk_conv, msk_msg) # conv_len, batch, classes
# compute average loss
avg_loss = []
pred_y = preds.view(self._hps['batch_size'], self._hps['conv_len'], -1)
true_y = y.view(self._hps['batch_size'], self._hps['conv_len'])
for i in range(self._hps['batch_size']):
avg_loss.append(self._criterion(pred_y[i], true_y[i]))
loss = torch.mean(torch.stack(avg_loss))
loss_value = loss.item()
# optimization step
loss.backward()
if self._hps['clip'] != -1:
nn.utils.clip_grad_norm_(self.parameters(), self._hps['clip'])
self._optimizer.step()
# compute metrics
if self._global_step % self._hps['save'] == 0:
y_pred = self._decode_softmax(preds, msk_conv)
y_test = y.view(self._hps['batch_size'] * self._hps['conv_len']).tolist()
y_test = list(filter(lambda z: z != -1, y_test)) # ignore padding
parameters = [p for p in self.parameters() if p.grad is not None]
total_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach()).to(self._device) for p in parameters])) # L2 norm
prec = precision_score(y_test, y_pred, average='macro', zero_division=0)
rec = recall_score(y_test, y_pred, average='macro', zero_division=0)
self._summary_writer.add_scalar('Loss/train', loss_value, self._global_step)
self._summary_writer.add_scalar('Precision/train', prec, self._global_step)
self._summary_writer.add_scalar('Recall/train', rec, self._global_step)
self._summary_writer.add_scalar('Grad norm/train', total_norm, self._global_step)
if self._hps['emb_dim'] != self._emb_bert_dim:
self._summary_writer.add_histogram('fc_emb/bias', self._fc_emb.bias, self._global_step)
self._summary_writer.add_histogram('fc_emb/weight', self._fc_emb.weight, self._global_step)
self._summary_writer.add_histogram('net_1/bias', self._net_1.bias_hh_l0, self._global_step)
self._summary_writer.add_histogram('net_1/weight', self._net_1.weight_hh_l0, self._global_step)
self._summary_writer.add_histogram('net_2/bias', self._net_2.bias_hh_l0, self._global_step)
self._summary_writer.add_histogram('net_2/weight', self._net_2.weight_hh_l0, self._global_step)
self._summary_writer.add_histogram('fc/bias', self._fc.bias, self._global_step)
self._summary_writer.add_histogram('fc/weight', self._fc.weight, self._global_step)
self._summary_writer.flush()
self._global_step += 1
return loss_value, self._global_step
def valid(self, x, y, msk_conv, msk_msg):
"""
Validate the model
:param x: input sequence (batch, conv_len, msg_len+2)
:param y: target sequence (batch, conv_len)
:param msk_conv: conversation mask (batch, conv_len)
:param msk_msg: message mask (batch, conv_len, msg_len+2)
:return: loss value, step
"""
with torch.no_grad():
self.eval()
preds = self(x, msk_conv, msk_msg) # conv_len, batch, classes
# compute average loss
avg_loss = []
pred_y = preds.view(self._hps['batch_size'], self._hps['conv_len'], -1)
true_y = y.view(self._hps['batch_size'], self._hps['conv_len'])
for i in range(self._hps['batch_size']):
avg_loss.append(self._criterion(pred_y[i], true_y[i]))
loss = torch.mean(torch.stack(avg_loss))
loss_value = loss.item()
# compute metrics
if self._global_step % self._hps['save'] == 0:
y_pred = self._decode_softmax(preds, msk_conv)
y_test = y.view(self._hps['batch_size'] * self._hps['conv_len']).tolist()
y_test = list(filter(lambda z: z != -1, y_test)) # ignore padding
prec = precision_score(y_test, y_pred, average='macro', zero_division=0)
rec = recall_score(y_test, y_pred, average='macro', zero_division=0)
self._summary_writer.add_scalar('Loss/valid', loss_value, self._global_step)
self._summary_writer.add_scalar('Precision/valid', prec, self._global_step)
self._summary_writer.add_scalar('Recall/valid', rec, self._global_step)
self._summary_writer.flush()
self._global_step += 1
return loss_value, self._global_step
def predict(self, x, msk_conv, msk_msg, no_batch=False):
"""
Use the model for prediction
:param x: input sequence (batch, conv_len, msg_len+2)
:param msk_conv: conversation mask (batch, conv_len)
:param msk_msg: message mask (batch, conv_len, msg_len+2)
:param no_batch: true if there is only 1 batch
:return: [unpad_conv_len]
"""
if no_batch:
self._hps['batch_size'] = 1
with torch.no_grad():
self.eval()
preds = self(x, msk_conv, msk_msg) # conv_len, batch, classes
return self._decode_softmax(preds, msk_conv)
|
the-stack_0_14405 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
from epicteller.core.dao.message import MessageDAO
from epicteller.core.model.message import TextMessageContent
from epicteller.core.util.enum import MessageType
async def main():
start = 0
limit = 1000
while messages := await MessageDAO.scan_messages(start, limit):
for message in messages:
print(f'message {message.id}', end='')
if message.type != MessageType.TEXT:
print(f'...skip, not text')
continue
assert isinstance(message.content, TextMessageContent)
print(f'[{message.content.text[:30]}]', end='')
replaced_text = message.content.text.replace('\r\n', '\n')
if replaced_text == message.content.text:
print(f'...skip, equally')
continue
content = TextMessageContent(text=replaced_text)
await MessageDAO.update_message(message.id, content=content.dict(), updated=message.created)
print(f'...replaced')
start += limit
print('Done')
if __name__ == '__main__':
asyncio.run(main())
|
the-stack_0_14407 | # -*- coding: utf-8 -*-
"""
Parse Excel table to output compact JSON.
Always outputs to terms.json
Usage:
python TermExtractor.py <inputfile.xlsx>
Dependencies:
pandas
"""
# Importing the libraries
import argparse
import json
import sys
from collections import namedtuple
import pandas as pd
# Requires filename to read from
parser = argparse.ArgumentParser()
parser.add_argument("inputfile")
args = parser.parse_args()
# Importing the excel data
try:
workbook = pd.read_excel(args.inputfile, sheet_name=None, encoding="utf-8")
except:
sys.exit("ERROR: file could not be read")
# Create list of named tuples for matching terms
termlist = []
Term = namedtuple("Term", "default yen alts")
for sheet in workbook.values():
height, width = sheet.shape
for y in range(0, height):
for x in range(0, width):
# Detect cells marked with # or % as the first letter
cell = sheet.iat[y, x]
if isinstance(cell, str):
# Regular term inclusion
if cell[0] == '#' or cell[0] == '%':
xpos = x + 1
default = cell[1:].strip()
# Grab primary replacement if valid
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
yen = sheet.iat[y, xpos].strip()
else:
yen = ""
alts = []
while True:
# Add optional alts
xpos += 1
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
alts.append(sheet.iat[y, xpos].strip())
else:
break
# Have at least one replacement, otherwise discard
if yen or alts:
termlist.append(Term(default=default, yen=yen, alts=alts))
# Automatically add a capitalized version only if starting with %
if cell[0] == '%':
xpos = x + 1
default = cell[1:].strip().capitalize()
# Grab primary replacement if valid
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
yen = sheet.iat[y, xpos].strip().capitalize()
else:
yen = ""
alts = []
while True:
# Add optional alts
xpos += 1
if (xpos < width) and (isinstance(sheet.iat[y, xpos], str)):
alts.append(sheet.iat[y, xpos].strip().capitalize())
else:
break
# Have at least one replacement, otherwise discard
if yen or alts:
termlist.append(Term(default=default, yen=yen, alts=alts))
# Sort by length, descending
termlist.sort(key=lambda term: len(getattr(term, "default")), reverse=True)
# Export JSON
jset = {}
# Add every term in form: "Default name": ["Default name", "yen name", "alts"]
for t in termlist:
# Add the Default name, then yen name. Repeat Default if no yen name is given
jset[t.default] = [t.default]
jset[t.default].append(t.yen if t.yen else t.default)
for a in t.alts:
jset[t.default].append(a)
try:
with open('terms.json', 'w', encoding="utf-8") as outfile:
json.dump(jset, outfile, indent=4, ensure_ascii=False)
except:
print("ERROR: Writing the file has failed!")
else:
print("terms.json written successfully")
|
the-stack_0_14408 | #!/bin/python3.5
# call it the regression testing file
# @DEVI-if you wanna pipe the output, run with python -u. buffered output
# screws up the output
import sys
import os
from test_LEB128 import test_signed_LEB128
from test_LEB128 import test_unsigned_LEB128
from leb128s import leb128sencodedecodeexhaustive
from leb128s import leb128uencodedecodeexhaustive
from abc import ABCMeta, abstractmethod
sys.path.append('../')
from utils import Colors
from argparser import *
from TBInit import *
total_test_cnt = int()
expected_pass_cnt = int()
expected_fail_cnt = int()
success = Colors.green + "SUCCESS: " + Colors.ENDC
fail = Colors.red + "FAIL: " + Colors.ENDC
# in order to keep the regression test script clean, the tests will need to
# inherit from this test class, implement the two virtual methods and then call
# it inside the main.
class Void_Spwner():
__metaclass__ = ABCMeta
def __init__(self):
pass
# this is the method that runs your tests
@abstractmethod
def Legacy(self):
pass
# this tells the class what name to use to display your test results
@abstractmethod
def GetName(self):
return(str())
def Spwn(self):
pid = os.fork()
# I don't have a bellybutton
if pid == 0:
self.Legacy()
sys.exit()
elif pid > 0:
cpid, status = os.waitpid(pid, 0)
if status == 0:
print(success + ': ' + self.GetName())
else:
print(fail + ': ' + self.GetName())
else:
# basically we couldnt fork a child
print(fail + 'return code:' + pid)
raise Exception("could not fork child")
def ObjectList():
obj_list = []
cwd = os.getcwd()
for file in os.listdir(cwd + "/testsuite"):
if file.endswith(".wasm"):
obj_list.append(cwd + "/testsuite/" + file)
return(obj_list)
################################################################################
class LEB128EncodeTest(Void_Spwner):
def Legacy(self):
test_unsigned_LEB128()
test_signed_LEB128()
def GetName(self):
return('leb128encodetest')
class LEB128Exhaustive(Void_Spwner):
def Legacy(self):
leb128sencodedecodeexhaustive()
leb128uencodedecodeexhaustive()
def GetName(self):
return('leb128exhaustive')
################################################################################
def main():
return_list = []
# LEB128 tests
leb128encodetest = LEB128EncodeTest()
leb128encodetest.Spwn()
# leb128s exhaustive
leb128sex = LEB128Exhaustive()
leb128sex.Spwn()
# parser test on the WASM testsuite
obj_list = ObjectList()
for testfile in obj_list:
pid = os.fork()
# I dont have a bellybutton
if pid == 0:
# @DEVI-FIXME-pipe stdout and stderr to a file instead of the
# bitbucket
sys.stdout = open('/dev/null', 'w')
sys.stderr = open('/dev/null', 'w')
interpreter = PythonInterpreter()
module = interpreter.parse(testfile)
interpreter.appendmodule(module)
interpreter.dump_sections(module)
interpreter.runValidations()
vm = VM(interpreter.getmodules())
ms = vm.getState()
# interpreter.dump_sections(module)
DumpIndexSpaces(ms)
DumpLinearMems(ms.Linear_Memory, 1000)
sys.exit()
# the parent process
elif pid > 0:
# @DEVI-FIXME-we are intentionally blocking. later i will fix this
# so we can use multicores to run our reg tests faster.
cpid, status = os.waitpid(pid, 0)
return_list.append(status)
if status == 0:
print(success + testfile)
else:
print(fail + testfile)
else:
# basically we couldnt fork a child
print(fail + 'return code:' + pid)
raise Exception("could not fork child")
if __name__ == '__main__':
main()
|
the-stack_0_14411 | try: from Tkinter import *
except: from tkinter import *
win = Tk()
win.title('Reality - Game')
win.iconbitmap('C:\Windows\System32')
win.geometry('400x200+100+100')
from os import startfile as s
fungtion_0 = lambda : s('R프롤로그')
fungtion_1 = lambda : s('R1화')
fungtion_2 = lambda : s('R2화')
fungtion_3 = lambda : s('R3화')
fungtion_4 = lambda : s('R4화')
fungtion_5 = lambda : s('R5화')
fungtion_6 = lambda : s('R6화')
fungtion_7 = lambda : s('R7화')
fungtion_8 = lambda : s('R8화')
fungtion_9 = lambda : s('R에필로그')
btn_0 = Button(win, text = '프롤로그', command = fungtion_0)
btn_0.pack()
btn_1 = Button(win, text = '1화', command = fungtion_1)
btn_1.pack()
btn_2 = Button(win, text = '2화', command = fungtion_2)
btn_2.pack()
btn_3 = Button(win, text = '3화', command = fungtion_3)
btn_3.pack()
btn_4 = Button(win, text = '4화', command = fungtion_4)
btn_4.pack()
btn_5 = Button(win, text = '5화', command = fungtion_5)
btn_5.pack()
btn_6 = Button(win, text = '6화', command = fungtion_6)
btn_6.pack()
btn_7 = Button(win, text = '7화', command = fungtion_7)
btn_7.pack()
btn_8 = Button(win, text = '8화', command = fungtion_8)
btn_8.pack()
btn_9 = Button(win, text = '에필로그', command = fungtion_9)
btn_9.pack()
win.mainloop() |
the-stack_0_14412 | from gym.spaces import Discrete, Box, MultiDiscrete, Space
import numpy as np
import tree
from typing import Union, Optional
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils import force_tuple
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
TensorType
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class Random(Exploration):
"""A random action selector (deterministic/greedy for explore=False).
If explore=True, returns actions randomly from `self.action_space` (via
Space.sample()).
If explore=False, returns the greedy/max-likelihood action.
"""
def __init__(self, action_space: Space, *, model: ModelV2,
framework: Optional[str], **kwargs):
"""Initialize a Random Exploration object.
Args:
action_space (Space): The gym action space used by the environment.
framework (Optional[str]): One of None, "tf", "tfe", "torch".
"""
super().__init__(
action_space=action_space,
model=model,
framework=framework,
**kwargs)
self.action_space_struct = get_base_struct_from_space(
self.action_space)
@override(Exploration)
def get_exploration_action(self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True):
# Instantiate the distribution object.
if self.framework in ["tf2", "tf", "tfe"]:
return self.get_tf_exploration_action_op(action_distribution,
explore)
else:
return self.get_torch_exploration_action(action_distribution,
explore)
def get_tf_exploration_action_op(
self, action_dist: ActionDistribution,
explore: Optional[Union[bool, TensorType]]):
def true_fn():
batch_size = 1
req = force_tuple(
action_dist.required_model_output_shape(
self.action_space, self.model.model_config))
# Add a batch dimension?
if len(action_dist.inputs.shape) == len(req) + 1:
batch_size = tf.shape(action_dist.inputs)[0]
# Function to produce random samples from primitive space
# components: (Multi)Discrete or Box.
def random_component(component):
if isinstance(component, Discrete):
return tf.random.uniform(
shape=(batch_size, ) + component.shape,
maxval=component.n,
dtype=component.dtype)
elif isinstance(component, MultiDiscrete):
return tf.concat(
[
tf.random.uniform(
shape=(batch_size, 1),
maxval=n,
dtype=component.dtype) for n in component.nvec
],
axis=1)
elif isinstance(component, Box):
if component.bounded_above.all() and \
component.bounded_below.all():
return tf.random.uniform(
shape=(batch_size, ) + component.shape,
minval=component.low,
maxval=component.high,
dtype=component.dtype)
else:
return tf.random.normal(
shape=(batch_size, ) + component.shape,
dtype=component.dtype)
actions = tree.map_structure(random_component,
self.action_space_struct)
return actions
def false_fn():
return action_dist.deterministic_sample()
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool) else explore,
true_fn=true_fn,
false_fn=false_fn)
# TODO(sven): Move into (deterministic_)sample(logp=True|False)
batch_size = tf.shape(tree.flatten(action)[0])[0]
logp = tf.zeros(shape=(batch_size, ), dtype=tf.float32)
return action, logp
def get_torch_exploration_action(self, action_dist: ActionDistribution,
explore: bool):
if explore:
req = force_tuple(
action_dist.required_model_output_shape(
self.action_space, self.model.model_config))
# Add a batch dimension?
if len(action_dist.inputs.shape) == len(req) + 1:
batch_size = action_dist.inputs.shape[0]
a = np.stack(
[self.action_space.sample() for _ in range(batch_size)])
else:
a = self.action_space.sample()
# Convert action to torch tensor.
action = torch.from_numpy(a).to(self.device)
else:
action = action_dist.deterministic_sample()
logp = torch.zeros(
(action.size()[0], ), dtype=torch.float32, device=self.device)
return action, logp
|
the-stack_0_14416 | import logging
import time
from xml.etree.ElementTree import fromstring
import declxml as xml
import requests
from requests_cache import CachedSession
logger = logging.getLogger(__name__)
class BGGClient:
BASE_URL = "https://www.boardgamegeek.com/xmlapi2"
def __init__(self, cache=None, debug=False):
if not cache:
self.requester = requests.Session()
else:
self.requester = cache.cache
if debug:
logging.basicConfig(level=logging.DEBUG)
def collection(self, user_name, **kwargs):
params = kwargs.copy()
params["username"] = user_name
data = self._make_request("/collection?version=1", params)
collection = self._collection_to_games(data)
return collection
def plays(self, user_name):
params = {
"username": user_name,
"page": 1,
}
all_plays = []
data = self._make_request("/plays?version=1", params)
new_plays = self._plays_to_games(data)
while (len(new_plays) > 0):
all_plays = all_plays + new_plays
params["page"] += 1
data = self._make_request("/plays?version=1", params)
new_plays = self._plays_to_games(data)
return all_plays
def game_list(self, game_ids):
if not game_ids:
return []
# Split game_ids into smaller chunks to avoid "414 URI too long"
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
games = []
for game_ids_subset in chunks(game_ids, 100):
url = "/thing/?stats=1&id=" + ",".join([str(id_) for id_ in game_ids_subset])
data = self._make_request(url)
games += self._games_list_to_games(data)
return games
def _make_request(self, url, params={}, tries=0):
try:
response = self.requester.get(BGGClient.BASE_URL + url, params=params)
except requests.exceptions.ConnectionError:
if tries < 3:
time.sleep(2)
return self._make_request(url, params=params, tries=tries + 1)
raise BGGException("BGG API closed the connection prematurely, please try again...")
logger.debug("REQUEST: " + response.url)
logger.debug("RESPONSE: \n" + prettify_if_xml(response.text))
if response.status_code != 200:
# Handle 202 Accepted
if response.status_code == 202:
if tries < 10:
time.sleep(5)
return self._make_request(url, params=params, tries=tries + 1)
# Handle 504 Gateway Timeout
if response.status_code == 540:
if tries < 3:
time.sleep(2)
return self._make_request(url, params=params, tries=tries + 1)
raise BGGException(
f"BGG returned status code {response.status_code} when "
f"requesting {response.url}"
)
tree = fromstring(response.text)
if tree.tag == "errors":
raise BGGException(
f"BGG returned errors while requesting {response.url} - " +
str([subnode.text for node in tree for subnode in node])
)
return response.text
def _plays_to_games(self, data):
def after_players_hook(_, status):
return status["name"]
plays_processor = xml.dictionary("plays", [
xml.array(
xml.dictionary('play', [
xml.integer(".", attribute="id", alias="playid"),
xml.dictionary('item', [
xml.string(".", attribute="name", alias="gamename"),
xml.integer(".", attribute="objectid", alias="gameid")
], alias='game'),
xml.array(
xml.dictionary('players/player', [
xml.string(".", attribute="name")
], required=False, alias='players', hooks=xml.Hooks(after_parse=after_players_hook))
)
], required=False, alias="plays")
)
])
plays = xml.parse_from_string(plays_processor, data)
plays = plays["plays"]
return plays
def _collection_to_games(self, data):
def after_status_hook(_, status):
return [tag for tag, value in status.items() if value == "1"]
game_in_collection_processor = xml.dictionary("items", [
xml.array(
xml.dictionary('item', [
xml.integer(".", attribute="objectid", alias="id"),
xml.string("name"),
xml.string("thumbnail", required=False, alias="image"),
xml.string("version/item/thumbnail", required=False, alias="image_version"),
xml.dictionary("status", [
xml.string(".", attribute="fortrade"),
xml.string(".", attribute="own"),
xml.string(".", attribute="preordered"),
xml.string(".", attribute="prevowned"),
xml.string(".", attribute="want"),
xml.string(".", attribute="wanttobuy"),
xml.string(".", attribute="wanttoplay"),
xml.string(".", attribute="wishlist"),
], alias='tags', hooks=xml.Hooks(after_parse=after_status_hook)),
xml.string("status", attribute="lastmodified", alias="lastmodified"),
xml.integer("numplays"),
], required=False, alias="items"),
)
])
collection = xml.parse_from_string(game_in_collection_processor, data)
collection = collection["items"]
return collection
def _games_list_to_games(self, data):
def numplayers_to_result(_, results):
result = {result["value"].lower().replace(" ", "_"): int(result["numvotes"]) for result in results}
if not result:
result = {'best': 0, 'recommended': 0, 'not_recommended': 0}
is_recommended = result['best'] + result['recommended'] > result['not_recommended']
if not is_recommended:
return "not_recommended"
is_best = result['best'] > 10 and result['best'] > result['recommended']
if is_best:
return "best"
return "recommended"
def suggested_numplayers(_, numplayers):
# Remove not_recommended player counts
numplayers = [players for players in numplayers if players["result"] != "not_recommended"]
# If there's only one player count, that's the best one
if len(numplayers) == 1:
numplayers[0]["result"] = "best"
# Just return the numbers
return [
(players["numplayers"], players["result"])
for players in numplayers
]
def log_item(_, item):
logger.debug("Successfully parsed: {} (id: {}).".format(item["name"], item["id"]))
return item
game_processor = xml.dictionary("items", [
xml.array(
xml.dictionary(
"item",
[
xml.integer(".", attribute="id"),
xml.string(".", attribute="type"),
xml.string("name[@type='primary']", attribute="value", alias="name"),
xml.string("description"),
xml.array(
xml.string(
"link[@type='boardgamecategory']",
attribute="value",
required=False
),
alias="categories",
),
xml.array(
xml.string(
"link[@type='boardgamemechanic']",
attribute="value",
required=False
),
alias="mechanics",
),
xml.array(
xml.dictionary(
"link[@type='boardgameexpansion']", [
xml.integer(".", attribute="id"),
xml.boolean(".", attribute="inbound", required=False),
],
required=False
),
alias="expansions",
),
xml.array(
xml.dictionary("poll[@name='suggested_numplayers']/results", [
xml.string(".", attribute="numplayers"),
xml.array(
xml.dictionary("result", [
xml.string(".", attribute="value"),
xml.integer(".", attribute="numvotes"),
], required=False),
hooks=xml.Hooks(after_parse=numplayers_to_result)
)
]),
alias="suggested_numplayers",
hooks=xml.Hooks(after_parse=suggested_numplayers),
),
xml.string(
"statistics/ratings/averageweight",
attribute="value",
alias="weight"
),
xml.string(
"statistics/ratings/ranks/rank[@friendlyname='Board Game Rank']",
attribute="value",
required=False,
alias="rank"
),
xml.string(
"statistics/ratings/usersrated",
attribute="value",
alias="usersrated"
),
xml.string(
"statistics/ratings/owned",
attribute="value",
alias="numowned"
),
xml.string(
"statistics/ratings/bayesaverage",
attribute="value",
alias="rating"
),
xml.string("playingtime", attribute="value", alias="playing_time"),
],
required=False,
alias="items",
hooks=xml.Hooks(after_parse=log_item),
)
)
])
games = xml.parse_from_string(game_processor, data)
games = games["items"]
return games
class CacheBackendSqlite:
def __init__(self, path, ttl):
self.cache = CachedSession(
cache_name=path,
backend="sqlite",
expire_after=ttl,
extension="",
fast_save=True,
allowable_codes=(200,)
)
class BGGException(Exception):
pass
def prettify_if_xml(xml_string):
import xml.dom.minidom
import re
xml_string = re.sub(r"\s+<", "<", re.sub(r">\s+", ">", re.sub(r"\s+", " ", xml_string)))
if not xml_string.startswith("<?xml"):
return xml_string
parsed = xml.dom.minidom.parseString(xml_string)
return parsed.toprettyxml()
|
the-stack_0_14417 | # Copyright 2011 OpenStack Foundation
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova import network
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'floating_ips')
def _translate_floating_ip_view(floating_ip):
result = {
'id': floating_ip['id'],
'ip': floating_ip['address'],
'pool': floating_ip['pool'],
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
except (TypeError, KeyError, AttributeError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['fixed_ip']['instance_uuid']
except (TypeError, KeyError, AttributeError):
result['instance_id'] = None
return {'floating_ip': result}
def _translate_floating_ips_view(floating_ips):
return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
for ip in floating_ips]}
def get_instance_by_floating_ip_addr(self, context, address):
snagiibfa = self.network_api.get_instance_id_by_floating_address
instance_id = snagiibfa(context, address)
if instance_id:
return common.get_instance(self.compute_api, context, instance_id)
def disassociate_floating_ip(self, context, instance, address):
try:
self.network_api.disassociate_floating_ip(context, instance, address)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
class FloatingIPController(object):
"""The Floating IPs API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(FloatingIPController, self).__init__()
def show(self, req, id):
"""Return data about the given floating ip."""
context = req.environ['nova.context']
authorize(context)
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.InvalidID):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return _translate_floating_ip_view(floating_ip)
def index(self, req):
"""Return a list of floating ips allocated to a project."""
context = req.environ['nova.context']
authorize(context)
floating_ips = self.network_api.get_floating_ips_by_project(context)
return _translate_floating_ips_view(floating_ips)
def create(self, req, body=None):
context = req.environ['nova.context']
authorize(context)
pool = None
if body and 'pool' in body:
pool = body['pool']
try:
address = self.network_api.allocate_floating_ip(context, pool)
ip = self.network_api.get_floating_ip_by_address(context, address)
except exception.NoMoreFloatingIps:
if pool:
msg = _("No more floating ips in pool %s.") % pool
else:
msg = _("No more floating ips available.")
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FloatingIpLimitExceeded:
if pool:
msg = _("IP allocation over quota in pool %s.") % pool
else:
msg = _("IP allocation over quota.")
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.FloatingIpPoolNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _translate_floating_ip_view(ip)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip(context, id)
except (exception.NotFound, exception.InvalidID):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
address = floating_ip['address']
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
try:
self.network_api.disassociate_and_release_floating_ip(
context, instance, floating_ip)
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise webob.exc.HTTPForbidden(explanation=msg)
return webob.Response(status_int=202)
class FloatingIPActionController(wsgi.Controller):
def __init__(self, ext_mgr=None, *args, **kwargs):
super(FloatingIPActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.network_api = network.API()
self.ext_mgr = ext_mgr
@wsgi.action('addFloatingIp')
def _add_floating_ip(self, req, id, body):
"""Associate floating_ip to an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['addFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
instance = common.get_instance(self.compute_api, context, id)
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
LOG.warning(
_LW('Info cache is %r during associate') % instance.info_cache,
instance=instance)
msg = _('No nw_info cache associated with instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_ips = cached_nwinfo.fixed_ips()
if not fixed_ips:
msg = _('No fixed ips associated to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
fixed_address = None
if self.ext_mgr.is_loaded('os-extended-floating-ips'):
if 'fixed_address' in body['addFloatingIp']:
fixed_address = body['addFloatingIp']['fixed_address']
for fixed in fixed_ips:
if fixed['address'] == fixed_address:
break
else:
msg = _('Specified fixed address not assigned to instance')
raise webob.exc.HTTPBadRequest(explanation=msg)
if not fixed_address:
try:
fixed_address = next(ip['address'] for ip in fixed_ips
if netaddr.valid_ipv4(ip['address']))
except StopIteration:
msg = _('Unable to associate floating ip %(address)s '
'to any fixed IPs for instance %(id)s. '
'Instance has no fixed IPv4 addresses to '
'associate.') % (
{'address': address, 'id': id})
raise webob.exc.HTTPBadRequest(explanation=msg)
if len(fixed_ips) > 1:
LOG.warning(_LW('multiple fixed_ips exist, using the first '
'IPv4 fixed_ip: %s'), fixed_address)
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=address,
fixed_address=fixed_address)
except exception.FloatingIpAssociated:
msg = _('floating ip is already associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed')
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.FloatingIpNotFoundForAddress:
msg = _('floating ip not found')
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.format_message())
except Exception as e:
msg = _('Unable to associate floating ip %(address)s to '
'fixed ip %(fixed_address)s for instance %(id)s. '
'Error: %(error)s') % (
{'address': address, 'fixed_address': fixed_address,
'id': id, 'error': e})
LOG.exception(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('removeFloatingIp')
def _remove_floating_ip(self, req, id, body):
"""Dissociate floating_ip from an instance."""
context = req.environ['nova.context']
authorize(context)
try:
address = body['removeFloatingIp']['address']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Address not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
# get the floating ip object
try:
floating_ip = self.network_api.get_floating_ip_by_address(context,
address)
except exception.FloatingIpNotFoundForAddress:
msg = _("floating ip not found")
raise webob.exc.HTTPNotFound(explanation=msg)
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
# disassociate if associated
if (instance and
floating_ip.get('fixed_ip_id') and
(uuidutils.is_uuid_like(id) and
[instance.uuid == id] or
[instance.id == id])[0]):
try:
disassociate_floating_ip(self, context, instance, address)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated')
raise webob.exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
else:
msg = _("Floating ip %(address)s is not associated with instance "
"%(id)s.") % {'address': address, 'id': id}
raise webob.exc.HTTPConflict(explanation=msg)
class Floating_ips(extensions.ExtensionDescriptor):
"""Floating IPs support."""
name = "FloatingIps"
alias = "os-floating-ips"
namespace = "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1"
updated = "2011-06-16T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-floating-ips',
FloatingIPController(),
member_actions={})
resources.append(res)
return resources
def get_controller_extensions(self):
controller = FloatingIPActionController(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
the-stack_0_14418 | import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.common import is_datetime64tz_dtype, needs_i8_conversion
import pandas as pd
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_unique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.unique()
# dict.fromkeys preserves the order
unique_values = list(dict.fromkeys(obj.values))
if isinstance(obj, pd.MultiIndex):
expected = pd.MultiIndex.from_tuples(unique_values)
expected.names = obj.names
tm.assert_index_equal(result, expected)
elif isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_unique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
result = obj.unique()
unique_values_raw = dict.fromkeys(obj.values)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)]
unique_values = [null_obj] + unique_values_not_null
if isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
elif isinstance(obj, pd.CategoricalIndex):
expected = expected.set_categories(unique_values_not_null)
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values, dtype=obj.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_nunique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
expected = len(obj.unique())
assert obj.nunique(dropna=False) == expected
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_nunique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
if isinstance(obj, pd.CategoricalIndex):
assert obj.nunique() == len(obj.categories)
assert obj.nunique(dropna=False) == len(obj.categories) + 1
else:
num_unique_values = len(obj.unique())
assert obj.nunique() == max(0, num_unique_values - 1)
assert obj.nunique(dropna=False) == max(0, num_unique_values)
@pytest.mark.parametrize(
"idx_or_series_w_bad_unicode", [pd.Index(["\ud83d"] * 2), pd.Series(["\ud83d"] * 2)]
)
def test_unique_bad_unicode(idx_or_series_w_bad_unicode):
# regression test for #34550
obj = idx_or_series_w_bad_unicode
result = obj.unique()
if isinstance(obj, pd.Index):
expected = pd.Index(["\ud83d"], dtype=object)
tm.assert_index_equal(result, expected)
else:
expected = np.array(["\ud83d"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_nunique_dropna(dropna):
# GH37566
s = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT])
res = s.nunique(dropna)
assert res == 1 if dropna else 5
|
the-stack_0_14419 | """
Обрабатываем/отправляем сообщения согласно протоколу:
сообщения разделены нулевым байтом \0.
"""
import select
import socket
HOST = "127.0.0.1"
PORT = 9999
clients = {}
SEP = b"\0"
class Client:
def __init__(self, sock):
self.sock = sock
self._out_stream = bytes()
self._accumulated_data = bytes()
def send(self, message):
self._out_stream += message + SEP
def recv(self):
data = self.sock.recv(1)
if not data:
self.sock.close()
return None
self._accumulated_data += data
messages = []
while True:
if SEP in self._accumulated_data:
msg, rest = self._accumulated_data.split(SEP, 1)
self._accumulated_data = rest
messages.append(msg)
else:
break
return messages
def flush(self):
sent = self.sock.send(self._out_stream)
self._out_stream = self._out_stream[sent:]
return len(self._out_stream) == 0
def broadcast(poll, message):
for client in clients.values():
client.send(message)
poll.register(client.sock, select.POLLOUT)
def main():
listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_sock.bind((HOST, PORT))
listen_sock.listen(5)
poll = select.poll()
poll.register(listen_sock, select.POLLIN)
while True:
for fd, event in poll.poll():
# сокет с ошибкой или соединение было закрыто
if event & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
poll.unregister(fd)
client = clients[fd]
print('Client {} disconnected'.format(client.sock.getpeername()))
del clients[fd]
# слушающий сокет
elif fd == listen_sock.fileno():
client_sock, addr = listen_sock.accept()
client_sock.setblocking(0)
fd = client_sock.fileno()
clients[fd] = Client(client_sock)
poll.register(fd, select.POLLIN)
print('Connection from {}'.format(addr))
# новые данные от клиента
elif event & select.POLLIN:
client = clients[fd]
messages = client.recv()
if messages:
for message in messages:
broadcast(poll, message)
# сокет клиента готов к записи
elif event & select.POLLOUT:
client = clients[fd]
is_empty = client.flush()
if is_empty:
poll.modify(client.sock, select.POLLIN)
if __name__ == '__main__':
main()
|
the-stack_0_14420 | #! /usr/bin/env python
"""
Module containing functions for cubes frame registration.
"""
__author__ = 'C. A. Gomez Gonzalez, V. Christiaens, G. Ruane, R. Farkas'
__all__ = ['frame_shift',
'cube_shift',
'shift_fft',
'frame_center_radon',
'frame_center_satspots',
'cube_recenter_satspots',
'cube_recenter_radon',
'cube_recenter_dft_upsampling',
'cube_recenter_2dfit',
'cube_recenter_via_speckles']
import numpy as np
import warnings
from packaging import version
try:
import cv2
no_opencv = False
except ImportError:
msg = "Opencv python bindings are missing."
warnings.warn(msg, ImportWarning)
no_opencv = True
from hciplot import plot_frames
from scipy.ndimage import fourier_shift
from scipy.ndimage import shift
import skimage
from skimage.transform import radon
if version.parse(skimage.__version__) <= version.parse('0.17.0'):
from skimage.feature import register_translation as cc_center
else:
from skimage.registration import phase_cross_correlation as cc_center
from multiprocessing import cpu_count
from matplotlib import pyplot as plt
from . import frame_crop
from ..conf import time_ini, timing, Progressbar
from ..conf.utils_conf import vip_figsize, check_array
from ..conf.utils_conf import pool_map, iterable
from ..stats import frame_basic_stats
from ..var import (get_square, frame_center, get_annulus_segments,
fit_2dmoffat, fit_2dgaussian, fit_2dairydisk,
fit_2d2gaussian, cube_filter_lowpass, cube_filter_highpass)
from ..preproc import cube_crop_frames
def frame_shift(array, shift_y, shift_x, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect'):
""" Shifts a 2D array by shift_y, shift_x. Boundaries are filled with zeros.
Parameters
----------
array : numpy ndarray
Input 2d array.
shift_y, shift_x: float
Shifts in y and x directions.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier', does a fourier shift operation and preserves better
the pixel values (therefore the flux and photometry). Interpolation
based shift ('opencv' and 'ndimage-interp') is faster than the fourier
shift. 'opencv' is recommended when speed is critical.
interpolation : str, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp'
(Scipy.ndimage), where the images are shifted via interpolation.
For Scipy.ndimage the options are: 'nearneig', bilinear', 'biquadratic',
'bicubic', 'biquartic' or 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For Opencv the options are: 'nearneig', 'bilinear', 'bicubic' or
'lanczos4'. The 'nearneig' interpolation is the fastest and the
'lanczos4' the slowest and accurate. 'lanczos4' is the default for
Opencv and 'biquartic' for Scipy.ndimage.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
Returns
-------
array_shifted : numpy ndarray
Shifted 2d array.
"""
check_array(array, dim=2)
image = array.copy()
if imlib == 'ndimage-fourier':
shift_val = (shift_y, shift_x)
array_shifted = fourier_shift(np.fft.fftn(image), shift_val)
array_shifted = np.fft.ifftn(array_shifted)
array_shifted = array_shifted.real
elif imlib == 'ndimage-interp':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'biquadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise ValueError('Scipy.ndimage interpolation method not '
'recognized')
if border_mode not in ['reflect', 'nearest', 'constant', 'mirror',
'wrap']:
raise ValueError('`border_mode` not recognized')
array_shifted = shift(image, (shift_y, shift_x), order=order,
mode=border_mode)
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install opencv or'
msg += ' set imlib to ndimage-fourier or ndimage-interp'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp = cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise ValueError('Opencv interpolation method not recognized')
if border_mode == 'mirror':
bormo = cv2.BORDER_REFLECT_101 # gfedcb|abcdefgh|gfedcba
elif border_mode == 'reflect':
bormo = cv2.BORDER_REFLECT # fedcba|abcdefgh|hgfedcb
elif border_mode == 'wrap':
bormo = cv2.BORDER_WRAP # cdefgh|abcdefgh|abcdefg
elif border_mode == 'constant':
bormo = cv2.BORDER_CONSTANT # iiiiii|abcdefgh|iiiiiii
elif border_mode == 'nearest':
bormo = cv2.BORDER_REPLICATE # aaaaaa|abcdefgh|hhhhhhh
else:
raise ValueError('`border_mode` not recognized')
image = np.float32(image)
y, x = image.shape
M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
array_shifted = cv2.warpAffine(image, M, (x, y), flags=intp,
borderMode=bormo)
elif imlib == 'vip-fft':
array_shifted = shift_fft(array, shift_x, shift_y)
else:
raise ValueError('Image transformation library not recognized')
return array_shifted
def cube_shift(cube, shift_y, shift_x, imlib='vip-fft',
interpolation='lanczos4'):
""" Shifts the X-Y coordinates of a cube or 3D array by x and y values.
Parameters
----------
cube : numpy ndarray, 3d
Input cube.
shift_y, shift_x: float, list of floats or np.ndarray of floats
Shifts in y and x directions for each frame. If the a single value is
given then all the frames will be shifted by the same amount.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
Returns
-------
cube_out : numpy ndarray, 3d
Cube with shifted frames.
"""
check_array(cube, dim=3)
nfr = cube.shape[0]
cube_out = np.zeros_like(cube)
if isinstance(shift_x, (int, float)):
shift_x = np.ones((nfr)) * shift_x
if isinstance(shift_y, (int, float)):
shift_y = np.ones((nfr)) * shift_y
for i in range(cube.shape[0]):
cube_out[i] = frame_shift(cube[i], shift_y[i], shift_x[i], imlib,
interpolation)
return cube_out
def frame_center_satspots(array, xy, subi_size=19, sigfactor=6, shift=False,
imlib='vip-fft', interpolation='lanczos4',
fit_type='moff', debug=False, verbose=True):
""" Finds the center of a frame with waffle/satellite spots (e.g. for
VLT/SPHERE). The method used to determine the center is by centroiding the
4 spots via a 2d Gaussian fit and finding the intersection of the
lines they create (see Notes). This method is very sensitive to the SNR of
the satellite spots, therefore thresholding of the background pixels is
performed. If the results are too extreme, the debug parameter will allow to
see in depth what is going on with the fit (maybe you'll need to adjust the
sigfactor for the background pixels thresholding).
Parameters
----------
array : numpy ndarray, 2d
Image or frame.
xy : tuple of 4 tuples of 2 elements
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
shift : bool, optional
If True the image is shifted.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
debug : bool, optional
If True debug information is printed and plotted.
verbose : bool, optional
If True the intersection and shifts information is printed out.
Returns
-------
array_rec
Shifted images. *Only returned if ``shift=True``.*
shifty, shiftx
Shift Y,X to get to the true center.
Notes
-----
linear system:
.. code-block: none
A1 * x + B1 * y = C1
A2 * x + B2 * y = C2
Cramer's rule - solution can be found in determinants:
.. code-block: none
x = Dx/D
y = Dy/D
where D is main determinant of the system:
.. code-block: none
A1 B1
A2 B2
and Dx and Dy can be found from matrices:
.. code-block: none
C1 B1
C2 B2
and
.. code-block: none
A1 C1
A2 C2
C column consequently substitutes the coef. columns of x and y
L stores our coefs A, B, C of the line equations.
.. code-block: none
For D: L1[0] L1[1] for Dx: L1[2] L1[1] for Dy: L1[0] L1[2]
L2[0] L2[1] L2[2] L2[1] L2[0] L2[2]
"""
def line(p1, p2):
""" produces coefs A, B, C of line equation by 2 points
"""
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
""" finds intersection point (if any) of 2 lines provided by coefs
"""
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return None
# --------------------------------------------------------------------------
check_array(array, dim=2)
if fit_type not in ['gaus','moff']:
raise TypeError('fit_type is not recognized')
if not isinstance(xy, (tuple, list)) or len(xy) != 4:
raise TypeError('Input waffle spot coordinates in wrong format (must '
'be a tuple of 4 tuples')
cy, cx = frame_center(array)
centx = []
centy = []
subims = []
for i in range(len(xy)):
sim, y, x = get_square(array, subi_size, xy[i][1], xy[i][0],
position=True, verbose=False)
if fit_type=='gaus':
cent2dgy, cent2dgx = fit_2dgaussian(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
else:
cent2dgy, cent2dgx = fit_2dmoffat(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
centx.append(cent2dgx + x)
centy.append(cent2dgy + y)
subims.append(sim)
cent2dgx_1, cent2dgx_2, cent2dgx_3, cent2dgx_4 = centx
cent2dgy_1, cent2dgy_2, cent2dgy_3, cent2dgy_4 = centy
si1, si2, si3, si4 = subims
if debug:
plot_frames((si1, si2, si3, si4), colorbar=True)
print('Centroids X,Y:')
print(cent2dgx_1, cent2dgy_1)
print(cent2dgx_2, cent2dgy_2)
print(cent2dgx_3, cent2dgy_3)
print(cent2dgx_4, cent2dgy_4)
L1 = line([cent2dgx_1, cent2dgy_1], [cent2dgx_4, cent2dgy_4])
L2 = line([cent2dgx_2, cent2dgy_2], [cent2dgx_3, cent2dgy_3])
R = intersection(L1, L2)
msgerr = "Check that the order of the tuples in `xy` is correct and"
msgerr += " the satellite spots have good S/N"
if R is not None:
shiftx = cx - R[0]
shifty = cy - R[1]
if np.abs(shiftx) < cx * 2 and np.abs(shifty) < cy * 2:
if debug or verbose:
print('Intersection coordinates (X,Y):', R[0], R[1], '\n')
print('Shifts (X,Y): {:.3f}, {:.3f}'.format(shiftx, shifty))
if shift:
array_rec = frame_shift(array, shifty, shiftx, imlib=imlib,
interpolation=interpolation)
return array_rec, shifty, shiftx, centy, centx
else:
return shifty, shiftx
else:
raise RuntimeError("Too large shifts. " + msgerr)
else:
raise RuntimeError("Something went wrong, no intersection found. " +
msgerr)
def shift_fft(array, xshift, yshift):
"""
Subpixel shifting of ``image`` using Fourier transformation.
Parameters
----------
array : 2d numpy ndarray
The image to be shifted.
xshift : float
Amount of desired shift in X direction.
yshift : float
Amount of desired shift in Y direction.
Returns
-------
shifted_array : 2d ndarray
Input ``image`` shifted by ``xshift`` and ``yshift``.
Notes
-----
based on ``LibAndromeda/oneralib/subpixel_shift.pro``, v1.3 2009/05/28
"""
npix = array.shape[0]
if npix != array.shape[1]:
raise ValueError("Input array must be square")
if npix%2:
cte = npix/2-0.5
else:
cte = npix/2
ramp = np.outer(np.ones(npix), np.arange(npix) - cte)
tilt = (-2*np.pi / npix) * (xshift*ramp + yshift*ramp.T)
fact = np.fft.fftshift(np.cos(tilt) + 1j*np.sin(tilt))
array_ft = np.fft.fft2(array) # no np.fft.fftshift applied!
shifted_array = np.fft.ifft2(array_ft * fact).real
return shifted_array
def cube_recenter_satspots(array, xy, subi_size=19, sigfactor=6, plot=True,
fit_type='moff', lbda=None, debug=False, verbose=True,
full_output=False):
""" Function analog to frame_center_satspots but for image sequences. It
actually will call frame_center_satspots for each image in the cube. The
function also returns the shifted images (not recommended to use when the
shifts are of a few percents of a pixel) and plots the histogram of the
shifts and calculate its statistics. This is important to assess the
dispersion of the star center by using artificial waffle/satellite spots
(like those in VLT/SPHERE images) and evaluate the uncertainty of the
position of the center. The use of the shifted images is not recommended.
Parameters
----------
array : numpy ndarray, 3d
Input cube.
xy : tuple of 4 tuples of 2 elements
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
If wavelength vector is not provided, assumes all sat spots of the cube
are at a similar location. If wavelength is provided, only coordinates
of the sat spots in the first channel should be provided. The boxes
location in other channels will be scaled accordingly.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
plot : bool, optional
Whether to plot the shifts.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
lbda: 1d array or list, opt
Wavelength vector. If provided, the subimages will be scaled accordingly
to follow the motion of the satellite spots.
debug : bool, optional
If True debug information is printed and plotted (fit and residuals,
intersections and shifts). This has to be used carefully as it can
produce too much output and plots.
verbose : bool, optional
Whether to print to stdout the timing and additional info.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
Returns
-------
array_rec
The shifted cube.
shift_y, shift_x
[full_output==True] Shifts Y,X to get to the true center for each image.
sat_y, sat_x
[full_output==True] Y,X positions of the satellite spots in each image.
Order: top-left, top-right, bottom-left and bottom-right.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
shift_x = np.zeros((n_frames))
shift_y = np.zeros((n_frames))
sat_y = np.zeros([n_frames,4])
sat_x = np.zeros([n_frames,4])
array_rec = []
if lbda is not None:
cy, cx = frame_center(array[0])
final_xy = []
rescal = lbda/lbda[0]
for i in range(n_frames):
xy_new = []
for s in range(4):
xy_new.append((cx+rescal[i]*(xy[s][0]-cx),cy+rescal[i]*(xy[s][1]-cy)))
xy_new = tuple(xy_new)
final_xy.append(xy_new)
else:
final_xy = [xy for i in range(n_frames)]
if verbose:
print("Final xy positions for sat spots:", final_xy)
print('Looping through the frames, fitting the intersections:')
for i in Progressbar(range(n_frames), verbose=verbose):
res = frame_center_satspots(array[i], final_xy[i], debug=debug, shift=True,
subi_size=subi_size, sigfactor=sigfactor,
fit_type=fit_type, verbose=False)
array_rec.append(res[0])
shift_y[i] = res[1]
shift_x[i] = res[2]
sat_y[i] = res[3]
sat_x[i] = res[4]
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(shift_x, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(shift_y, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(shift_x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(shift_y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if verbose:
msg1 = 'MEAN X,Y: {:.3f}, {:.3f}'
print(msg1.format(np.mean(shift_x), np.mean(shift_y)))
msg2 = 'MEDIAN X,Y: {:.3f}, {:.3f}'
print(msg2.format(np.median(shift_x), np.median(shift_y)))
msg3 = 'STDDEV X,Y: {:.3f}, {:.3f}'
print(msg3.format(np.std(shift_x), np.std(shift_y)))
array_rec = np.array(array_rec)
if full_output:
return array_rec, shift_y, shift_x, sat_y, sat_x
else:
return array_rec
def frame_center_radon(array, cropsize=None, hsize=0.4, step=0.01,
mask_center=None, nproc=None, satspots_cfg=None,
full_output=False, verbose=True,
plot=True, debug=False):
""" Finding the center of a broadband (co-added) frame with speckles and
satellite spots elongated towards the star (center). We use the radon
transform implementation from scikit-image.
Parameters
----------
array : numpy ndarray
Input 2d array or image.
cropsize : None or odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the bright elongated
speckle or satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
satspots_cfg: None or str ('x' or '+'), opt
If satellite spots are present, provide a string corresponding to the
configuration of the satellite spots: either as a cross ('x') or as a
plus sign ('+'). Leave to None if no satellite spots present. Usually
the Radon transform centering works better if bright satellite spots
are present.
verbose : bool optional
Whether to print to stdout some messages and info.
plot : bool, optional
Whether to plot the radon cost function.
debug : bool, optional
Whether to print and plot intermediate info.
Returns
-------
[full_output=True] 2d np array
Radon cost function surface is returned if full_output set to True
optimy, optimx : float
Values of the Y, X coordinates of the center of the frame based on the
radon optimization. (always returned)
Notes
-----
Based on Pueyo et al. 2014: http://arxiv.org/abs/1409.6388
"""
from .cosmetics import frame_crop
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if verbose:
start_time = time_ini()
frame = array.copy()
ori_cent, _ = frame_center(frame)
if cropsize is not None:
if not cropsize%2:
raise TypeError("If not None, cropsize should be odd integer")
frame = frame_crop(frame, cropsize, verbose=False)
listyx = np.linspace(start=-hsize, stop=hsize, num=int(2*hsize/step)+1,
endpoint=True)
if not mask_center:
radint = 0
else:
if not isinstance(mask_center, int):
raise TypeError
radint = mask_center
coords = [(y, x) for y in listyx for x in listyx]
cent, _ = frame_center(frame)
frame = get_annulus_segments(frame, radint, cent-radint, mode="mask")[0]
if debug:
if satspots_cfg is not None:
samples = 10
if satspots_cfg == 'x':
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
elif satspots_cfg == '+':
theta = np.hstack((np.linspace(start=-5, stop=5, num=samples,
endpoint=False),
np.linspace(start=85, stop=95, num=samples,
endpoint=False),
np.linspace(start=175, stop=185, num=samples,
endpoint=False),
np.linspace(start=265, stop=275, num=samples,
endpoint=False)))
else:
msg = "If not None, satspots_cfg can only be 'x' or '+'."
raise ValueError(msg)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
else:
theta = np.linspace(start=0, stop=360, num=int(cent*2),
endpoint=False)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
costf = []
for coord in coords:
res = _radon_costf(frame, cent, radint, coord, satspots_cfg)
costf.append(res)
costf = np.array(costf)
elif nproc > 1:
res = pool_map(nproc, _radon_costf, frame, cent, radint,
iterable(coords), satspots_cfg)
costf = np.array(res)
if verbose:
msg = 'Done {} radon transform calls distributed in {} processes'
print(msg.format(len(coords), nproc))
cost_bound = costf.reshape(listyx.shape[0], listyx.shape[0])
if plot:
plt.contour(cost_bound, cmap='CMRmap', origin='lower', lw=1, hold='on')
plt.imshow(cost_bound, cmap='CMRmap', origin='lower',
interpolation='nearest')
plt.colorbar()
plt.grid('off')
plt.show()
# argm = np.argmax(costf) # index of 1st max in 1d cost function 'surface'
# optimy, optimx = coords[argm]
# maxima in the 2d cost function surface
num_max = np.where(cost_bound == cost_bound.max())[0].shape[0]
ind_maximay, ind_maximax = np.where(cost_bound == cost_bound.max())
argmy = ind_maximay[int(np.ceil(num_max/2)) - 1]
argmx = ind_maximax[int(np.ceil(num_max/2)) - 1]
y_grid = np.array(coords)[:, 0].reshape(listyx.shape[0], listyx.shape[0])
x_grid = np.array(coords)[:, 1].reshape(listyx.shape[0], listyx.shape[0])
optimy = y_grid[argmy, 0]+(ori_cent-cent)/2
optimx = x_grid[0, argmx]+(ori_cent-cent)/2
if verbose:
print('Cost function max: {}'.format(costf.max()))
print('Cost function # maxima: {}'.format(num_max))
msg = 'Finished grid search radon optimization. Y={:.5f}, X={:.5f}'
print(msg.format(optimy, optimx))
timing(start_time)
if full_output:
return cost_bound, optimy, optimx
else:
return optimy, optimx
def _radon_costf(frame, cent, radint, coords, satspots_cfg=None):
""" Radon cost function used in frame_center_radon().
"""
frame_shifted = frame_shift(frame, coords[0], coords[1])
frame_shifted_ann = get_annulus_segments(frame_shifted, radint,
cent-radint, mode="mask")[0]
if satspots_cfg is None:
theta = np.linspace(start=0, stop=360, num=frame_shifted_ann.shape[0],
endpoint=False)
elif satspots_cfg == 'x':
samples = 10
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
else:
samples = 10
theta = np.hstack((np.linspace(start=-5, stop=5, num=samples,
endpoint=False),
np.linspace(start=85, stop=95, num=samples,
endpoint=False),
np.linspace(start=175, stop=185, num=samples,
endpoint=False),
np.linspace(start=265, stop=275, num=samples,
endpoint=False)))
sinogram = radon(frame_shifted_ann, theta=theta, circle=True)
costf = np.sum(np.abs(sinogram[int(cent), :]))
return costf
def cube_recenter_radon(array, full_output=False, verbose=True, imlib='vip-fft',
interpolation='lanczos4', **kwargs):
""" Recenters a cube looping through its frames and calling the
``frame_center_radon`` function.
Parameters
----------
array : numpy ndarray
Input 3d array or cube.
full_output : {False, True}, bool optional
If True the recentered cube is returned along with the y and x shifts.
verbose : {True, False}, bool optional
Whether to print timing and intermediate information to stdout.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
cropsize : odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
debug : bool, optional
Whether to print and plot intermediate info from ``frame_center_radon``.
Returns
-------
array_rec : 3d ndarray
Recentered cube.
y, x : 1d arrays of floats
[full_output] Shifts in y and x.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
y[i], x[i] = frame_center_radon(array[i], verbose=False, plot=False,
**kwargs)
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation)
if verbose:
timing(start_time)
if full_output:
return array_rec, y, x
else:
return array_rec
def cube_recenter_dft_upsampling(array, center_fr1=None, negative=False,
fwhm=4, subi_size=None, upsample_factor=100,
imlib='vip-fft', interpolation='lanczos4',
mask=None, full_output=False, verbose=True,
nproc=1, save_shifts=False, debug=False,
plot=True):
""" Recenters a cube of frames using the DFT upsampling method as
proposed in Guizar et al. 2008 and implemented in the
``register_translation`` function from scikit-image.
The algorithm (DFT upsampling) obtains an initial estimate of the
cross-correlation peak by an FFT and then refines the shift estimation by
upsampling the DFT only in a small neighborhood of that estimate by means
of a matrix-multiply DFT.
Parameters
----------
array : numpy ndarray
Input cube.
center_fr1 = (cy_1, cx_1) : Tuple, optional
Coordinates of the center of the subimage for fitting a 2d Gaussian and
centroiding the 1st frame.
negative : bool, optional
If True the centroiding of the 1st frames is done with a negative
2d Gaussian fit.
fwhm : float, optional
FWHM size in pixels.
subi_size : int or None, optional
Size of the square subimage sides in pixels, used to centroid to first
frame. If subi_size is None then the first frame is assumed to be
centered already.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
upsample_factor : int, optional
Upsampling factor (default 100). Images will be registered to within
1/upsample_factor of a pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
[Note: only ysed uf version of skimage >= 0.18.0]
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
debug : bool, optional
Whether to print to stdout the shifts or not.
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_recentered : numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
Notes
-----
Using the implementation from scikit-image of the algorithm described in
Guizar-Sicairos et al. "Efficient subpixel image registration algorithms,"
Opt. Lett. 33, 156-158 (2008). This algorithm registers two images (2-D
rigid translation) within a fraction of a pixel specified by the user.
Instead of computing a zero-padded FFT (fast Fourier transform), this code
uses selective upsampling by a matrix-multiply DFT (discrete FT) to
dramatically reduce computation time and memory without sacrificing
accuracy. With this procedure all the image points are used to compute the
upsampled cross-correlation in a very small neighborhood around its peak.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
if mask is not None:
if mask.shape[-1]!=array.shape[-1] or mask.shape[-2]!=array.shape[-2]:
msg = "If provided, mask should have same shape as frames"
raise TypeError(msg)
n_frames, sizey, sizex = array.shape
if subi_size is not None:
if center_fr1 is None:
print('`cx_1` or `cy_1` not be provided')
print('Using the coordinated of the 1st frame center for '
'the Gaussian 2d fit')
cy_1, cx_1 = frame_center(array[0])
else:
cy_1, cx_1 = center_fr1
if not isinstance(subi_size, int):
raise ValueError('subi_size must be an integer or None')
if subi_size < fwhm:
raise ValueError('`subi_size` (value in pixels) is too small')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
cy, cx = frame_center(array[0])
# Centroiding first frame with 2d gaussian and shifting
msg0 = "The rest of the frames will be shifted by cross-correlation wrt the" \
" 1st"
if subi_size is not None:
y1, x1 = _centroid_2dg_frame(array_rec, 0, subi_size, cy_1, cx_1,
negative, debug, fwhm)
x[0] = cx - x1
y[0] = cy - y1
array_rec[0] = frame_shift(array_rec[0], shift_y=y[0], shift_x=x[0],
imlib=imlib, interpolation=interpolation)
if verbose:
msg = "Shift for first frame X,Y=({:.3f}, {:.3f})"
print(msg.format(x[0], y[0]))
print(msg0)
if debug:
titd = "original / shifted 1st frame subimage"
plot_frames((frame_crop(array[0], subi_size, verbose=False),
frame_crop(array_rec[0], subi_size, verbose=False)),
grid=True, title=titd)
else:
if verbose:
print("The first frame is assumed to be well centered wrt the"
"center of the array")
print(msg0)
x[0] = 0
y[0] = 0
# Finding the shifts with DTF upsampling of each frame wrt the first
if nproc == 1:
for i in Progressbar(range(1, n_frames), desc="frames", verbose=verbose):
y[i], x[i], array_rec[i] = _shift_dft(array_rec, array, i,
upsample_factor, mask,
interpolation, imlib)
elif nproc > 1:
res = pool_map(nproc, _shift_dft, array_rec, array,
iterable(range(1, n_frames)),
upsample_factor, interpolation, imlib)
res = np.array(res)
y[1:] = res[:,0]
x[1:] = res[:,1]
array_rec[1:] = [frames for frames in res[:,2]]
if debug:
print("\nShifts in X and Y")
for i in range(n_frames):
print(x[i], y[i])
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
return array_rec, y, x
else:
return array_rec
def _shift_dft(array_rec, array, frnum, upsample_factor, mask, interpolation,
imlib):
"""
function used in recenter_dft_unsampling
"""
if version.parse(skimage.__version__) > version.parse('0.17.0'):
shift_yx = cc_center(array_rec[0], array[frnum],
upsample_factor=upsample_factor, reference_mask=mask,
return_error=False)
else:
shift_yx = cc_center(array_rec[0], array[frnum],
upsample_factor=upsample_factor)
y_i, x_i = shift_yx
array_rec_i = frame_shift(array[frnum], shift_y=y_i, shift_x=x_i,
imlib=imlib, interpolation=interpolation)
return y_i, x_i, array_rec_i
def cube_recenter_2dfit(array, xy=None, fwhm=4, subi_size=5, model='gauss',
nproc=1, imlib='vip-fft', interpolation='lanczos4',
offset=None, negative=False, threshold=False,
sigfactor=2, fix_neg=False, params_2g=None,
save_shifts=False, full_output=False, verbose=True,
debug=False, plot=True):
""" Recenters the frames of a cube. The shifts are found by fitting a 2d
Gaussian or Moffat to a subimage centered at ``xy``. This assumes the frames
don't have too large shifts (>5px). The frames are shifted using the
function frame_shift().
Parameters
----------
array : numpy ndarray
Input cube.
xy : tuple of integers or floats
Integer coordinates of the center of the subimage (wrt the original frame).
For the double gaussian fit with fixed negative gaussian, this should
correspond to the exact location of the center of the negative gaussiam
(e.g. the center of the coronagraph mask) - in that case a tuple of
floats is also accepted.
fwhm : float or numpy ndarray
FWHM size in pixels, either one value (float) that will be the same for
the whole cube, or an array of floats with the same dimension as the
0th dim of array, containing the fwhm for each channel (e.g. in the case
of an ifs cube, where the fwhm varies with wavelength)
subi_size : int, optional
Size of the square subimage sides in pixels.
model : str, optional
Sets the type of fit to be used. 'gauss' for a 2d Gaussian fit,
'moff' for a 2d Moffat fit, 'airy' for a 2d Airy disk fit, and
'2gauss' for a 2d double Gaussian (positive+negative) fit.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
offset : tuple of floats, optional
If None the region of the frames used for the 2d Gaussian/Moffat fit is
shifted to the center of the images (2d arrays). If a tuple is given it
serves as the offset of the fitted area wrt the center of the 2d arrays.
negative : bool, optional
If True a negative 2d Gaussian/Moffat fit is performed.
fix_neg: bool, optional
In case of a double gaussian fit, whether to fix the parameters of the
megative gaussian. If True, they should be provided in params_2g.
params_2g: None or dictionary, optional
In case of a double gaussian fit, dictionary with either fixed or first
guess parameters for the double gaussian. E.g.:
params_2g = {'fwhm_neg': 3.5, 'fwhm_pos': (3.5,4.2), 'theta_neg': 48.,
'theta_pos':145., 'neg_amp': 0.5}
fwhm_neg: float or tuple with fwhm of neg gaussian
fwhm_pos: can be a tuple for x and y axes of pos gaussian (replaces fwhm)
theta_neg: trigonometric angle of the x axis of the neg gaussian (deg)
theta_pos: trigonometric angle of the x axis of the pos gaussian (deg)
neg_amp: amplitude of the neg gaussian wrt the amp of the positive one
Note: it is always recommended to provide theta_pos and theta_neg for a
better fit.
threshold : bool, optional
If True the background pixels (estimated using sigma clipped statistics)
will be replaced by small random Gaussian noise (recommended for 2g).
sigfactor: float, optional
If thresholding is performed, set the the threshold in terms of
gaussian sigma in the subimage (will depend on your cropping size).
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
debug : bool, optional
If True the details of the fitting are shown. Won't work when the cube
contains >20 frames (as it might produce an extremely long output).
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_rec: numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
n_frames, sizey, sizex = array.shape
if not isinstance(subi_size, int):
raise ValueError('`subi_size` must be an integer')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
if isinstance(fwhm, (float, int, np.float32, np.float64)):
fwhm = np.ones(n_frames) * fwhm
if debug and array.shape[0] > 20:
msg = 'Debug with a big array will produce a very long output. '
msg += 'Try with less than 20 frames in debug mode'
raise RuntimeWarning(msg)
if xy is not None:
pos_x, pos_y = xy
cond = model != '2gauss'
if (not isinstance(pos_x, int) or not isinstance(pos_y, int)) and cond:
raise TypeError('`xy` must be a tuple of integers')
else:
pos_y, pos_x = frame_center(array[0])
cy, cx = frame_center(array[0])
array_rec = np.empty_like(array)
if model == 'gauss':
func = _centroid_2dg_frame
elif model == 'moff':
func = _centroid_2dm_frame
elif model == 'airy':
func = _centroid_2da_frame
elif model == '2gauss':
func = _centroid_2d2g_frame
else:
raise ValueError('model not recognized')
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
res = []
print('2d {}-fitting'.format(model))
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
if model == "2gauss":
args = [array, i, subi_size, pos_y, pos_x, debug, fwhm[i],
fix_neg, params_2g, threshold, sigfactor]
else:
args = [array, i, subi_size, pos_y, pos_x, negative, debug,
fwhm[i], threshold, sigfactor]
res.append(func(*args))
res = np.array(res)
elif nproc > 1:
if model == "2gauss":
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
debug, iterable(fwhm), fix_neg, params_2g, threshold,
sigfactor]
else:
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
negative, debug, iterable(fwhm), threshold, sigfactor]
res = pool_map(nproc, func, *args)
res = np.array(res)
y = cy - res[:, 0]
x = cx - res[:, 1]
if model == "2gauss" and not fix_neg:
y_neg = res[:, 2]
x_neg = res[:, 3]
fwhm_x = res[:, 4]
fwhm_y = res[:, 5]
fwhm_neg_x = res[:, 6]
fwhm_neg_y = res[:, 7]
theta = res[:, 8]
theta_neg = res[:, 9]
amp_pos = res[:,10]
amp_neg = res[:, 11]
if offset is not None:
offx, offy = offset
y -= offy
x -= offx
for i in Progressbar(range(n_frames), desc="Shifting", verbose=verbose):
if debug:
print("\nShifts in X and Y")
print(x[i], y[i])
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation)
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
if model == "2gauss" and not fix_neg:
_ = plt.hist(cx-x_neg, bins=b, alpha=0.5,
label=la + ' shifts X (neg gaussian)')
_ = plt.hist(cy-y_neg, bins=b, alpha=0.5,
label=la + ' shifts Y (neg gaussian)')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_gauss_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
if model == "2gauss" and not fix_neg:
return (array_rec, y, x, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x,
fwhm_neg_y, theta, theta_neg, amp_pos, amp_neg)
return array_rec, y, x
else:
return array_rec
def _centroid_2dg_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d gaussian fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative gaussian fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False, fwhmx=fwhm, fwhmy=fwhm,
threshold=threshold, sigfactor=sigfactor, debug=debug,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2dm_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d moffat fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dmoffat(sub_image, crop=False, fwhm=fwhm, debug=debug,
threshold=threshold, sigfactor=sigfactor,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2da_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d Airy disk fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dairydisk(sub_image, crop=False, fwhm=fwhm,
threshold=threshold, sigfactor=sigfactor,
full_output=False, debug=debug)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2d2g_frame(cube, frnum, size, pos_y, pos_x, debug=False, fwhm=4,
fix_neg=True, params_2g=None, threshold=False,
sigfactor=1):
""" Finds the centroid by using a 2d double gaussian (positive+negative)
fitting in one frame from a cube. To be called from within
cube_recenter_doublegauss2d_fit().
"""
size = min(cube[frnum].shape[0],cube[frnum].shape[1],size)
#sub_image, y1, x1 = get_square_robust(cube[frnum], size=size, y=pos_y,
# x=pos_x, position=True)
if isinstance(params_2g,dict):
fwhm_neg = params_2g.get('fwhm_neg', 0.8*fwhm)
fwhm_pos = params_2g.get('fwhm_pos', 2*fwhm)
theta_neg = params_2g.get('theta_neg', 0.)
theta_pos = params_2g.get('theta_pos', 0.)
neg_amp = params_2g.get('neg_amp', 1)
res_DF = fit_2d2gaussian(cube[frnum], crop=True, cent=(pos_x,pos_y),
cropsize=size, fwhm_neg=fwhm_neg, fwhm_pos=fwhm_pos,
neg_amp=neg_amp, fix_neg=fix_neg, theta_neg=theta_neg,
theta_pos=theta_pos, threshold=threshold,
sigfactor=sigfactor, full_output=True, debug=debug)
y_i = res_DF['centroid_y']
x_i = res_DF['centroid_x']
if not fix_neg:
y_neg = res_DF['centroid_y_neg']
x_neg = res_DF['centroid_x_neg']
fwhm_x = res_DF['fwhm_x']
fwhm_y = res_DF['fwhm_y']
fwhm_neg_x = res_DF['fwhm_x_neg']
fwhm_neg_y = res_DF['fwhm_y_neg']
theta = res_DF['theta']
theta_neg = res_DF['theta_neg']
amp_pos = res_DF['amplitude']
amp_neg = res_DF['amplitude_neg']
return (y_i, x_i, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x, fwhm_neg_y,
theta, theta_neg, amp_pos, amp_neg)
return y_i, x_i
# TODO: make parameter names match the API
def cube_recenter_via_speckles(cube_sci, cube_ref=None, alignment_iter=5,
gammaval=1, min_spat_freq=0.5, max_spat_freq=3,
fwhm=4, debug=False, recenter_median=False,
fit_type='gaus', negative=True, crop=True,
subframesize=21, mask=None, imlib='vip-fft',
interpolation='lanczos4', plot=True,
full_output=False):
""" Registers frames based on the median speckle pattern. Optionally centers
based on the position of the vortex null in the median frame. Images are
filtered to isolate speckle spatial frequencies.
Parameters
----------
cube_sci : numpy ndarray
Science cube.
cube_ref : numpy ndarray
Reference cube (e.g. for NIRC2 data in RDI mode).
alignment_iter : int, optional
Number of alignment iterations (recomputes median after each iteration).
gammaval : int, optional
Applies a gamma correction to emphasize speckles (useful for faint
stars).
min_spat_freq : float, optional
Spatial frequency for low pass filter.
max_spat_freq : float, optional
Spatial frequency for high pass filter.
fwhm : float, optional
Full width at half maximum.
debug : bool, optional
Outputs extra info.
recenter_median : bool, optional
Recenter the frames at each iteration based on a 2d fit.
fit_type : str, optional
If recenter_median is True, this is the model to which the image is
fitted to for recentering. 'gaus' works well for NIRC2_AGPM data.
'ann' works better for NACO+AGPM data.
negative : bool, optional
If True, uses a negative gaussian fit to determine the center of the
median frame.
crop: bool, optional
Whether to calculate the recentering on a cropped version of the cube
that is speckle-dominated (recommended).
subframesize : int, optional
Sub-frame window size used. Should cover the region where speckles are
the dominant noise source.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
imlib : str, optional
Image processing library to use.
interpolation : str, optional
Interpolation method to use.
plot : bool, optional
If True, the shifts are plotted.
full_ouput: bool, optional
Whether to return more varibales, useful for debugging.
Returns
-------
if full_output is False, returns:
cube_reg_sci: Registered science cube (numpy 3d ndarray)
If cube_ref is not None, also returns:
cube_reg_ref: Ref. cube registered to science frames (np 3d ndarray)
If full_output is True, returns in addition to the above:
cube_sci_lpf: Low+high-pass filtered science cube (np 3d ndarray)
cube_stret: Cube with stretched values used for cross-corr (np 3d ndarray)
cum_x_shifts_sci: Vector of x shifts for science frames (np 1d array)
cum_y_shifts_sci: Vector of y shifts for science frames (np 1d array)
And if cube_ref is not None, also returns:
cum_x_shifts_ref: Vector of x shifts for ref. frames.
cum_y_shifts_ref: Vector of y shifts for ref. frames.
"""
n, y, x = cube_sci.shape
check_array(cube_sci, dim=3)
if recenter_median and fit_type not in {'gaus','ann'}:
raise TypeError("fit type not recognized. Should be 'ann' or 'gaus'")
if crop and not subframesize < y/2.:
raise ValueError('`Subframesize` is too large')
if cube_ref is not None:
ref_star = True
nref = cube_ref.shape[0]
else:
ref_star = False
if crop:
cube_sci_subframe = cube_crop_frames(cube_sci, subframesize,
verbose=False)
if ref_star:
cube_ref_subframe = cube_crop_frames(cube_ref, subframesize,
verbose=False)
else:
subframesize = cube_sci.shape[-1]
cube_sci_subframe = cube_sci.copy()
if ref_star:
cube_ref_subframe = cube_ref.copy()
ceny, cenx = frame_center(cube_sci_subframe[0])
print('Sub frame shape: {}'.format(cube_sci_subframe.shape))
print('Center pixel: ({}, {})'.format(ceny, cenx))
# Filtering cubes. Will be used for alignment purposes
cube_sci_lpf = cube_sci_subframe.copy()
if ref_star:
cube_ref_lpf = cube_ref_subframe.copy()
cube_sci_lpf = cube_sci_lpf + np.abs(np.min(cube_sci_lpf))
if ref_star:
cube_ref_lpf = cube_ref_lpf + np.abs(np.min(cube_ref_lpf))
median_size = int(fwhm * max_spat_freq)
# Remove spatial frequencies <0.5 lam/D and >3lam/D to isolate speckles
cube_sci_hpf = cube_filter_highpass(cube_sci_lpf, 'median-subt',
median_size=median_size, verbose=False)
if min_spat_freq>0:
cube_sci_lpf = cube_filter_lowpass(cube_sci_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_sci_lpf = cube_sci_hpf
if ref_star:
cube_ref_hpf = cube_filter_highpass(cube_ref_lpf, 'median-subt',
median_size=median_size,
verbose=False)
if min_spat_freq>0:
cube_ref_lpf = cube_filter_lowpass(cube_ref_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_ref_lpf = cube_ref_hpf
if ref_star:
alignment_cube = np.zeros((1 + n + nref, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
alignment_cube[(n + 1):(n + 2 + nref), :, :] = cube_ref_lpf
else:
alignment_cube = np.zeros((1 + n, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
n_frames = alignment_cube.shape[0] # 1+n or 1+n+nref
cum_y_shifts = 0
cum_x_shifts = 0
for i in range(alignment_iter):
alignment_cube[0] = np.median(alignment_cube[1:(n + 1)], axis=0)
if recenter_median:
# Recenter the median frame using a 2d fit
if fit_type == 'gaus':
crop_sz = int(fwhm)
else:
crop_sz = int(6*fwhm)
if not crop_sz%2:
crop_sz+=1
sub_image, y1, x1 = get_square(alignment_cube[0], size=crop_sz,
y=ceny, x=cenx, position=True)
if fit_type == 'gaus':
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False,
threshold=False, sigfactor=1,
debug=debug, full_output=False)
elif fit_type == 'ann':
y_i, x_i, rad = _fit_2dannulus(sub_image, fwhm=fwhm, crop=False,
hole_rad=0.5, sampl_cen=0.1,
sampl_rad=0.2, ann_width=0.5,
unc_in=2.)
yshift = ceny - (y1 + y_i)
xshift = cenx - (x1 + x_i)
alignment_cube[0] = frame_shift(alignment_cube[0, :, :], yshift,
xshift, imlib=imlib,
interpolation=interpolation)
# center the cube with stretched values
cube_stret = np.log10((np.abs(alignment_cube) + 1) ** gammaval)
if mask is not None and crop:
mask_tmp = frame_crop(mask, subframesize)
else:
mask_tmp = mask
res = cube_recenter_dft_upsampling(cube_stret, (ceny, cenx), fwhm=fwhm,
subi_size=None, full_output=True,
verbose=False, plot=False,
mask=mask_tmp, imlib=imlib,
interpolation=interpolation)
_, y_shift, x_shift = res
sqsum_shifts = np.sum(np.sqrt(y_shift ** 2 + x_shift ** 2))
print('Square sum of shift vecs: ' + str(sqsum_shifts))
for j in range(1, n_frames):
alignment_cube[j] = frame_shift(alignment_cube[j], y_shift[j],
x_shift[j], imlib=imlib,
interpolation=interpolation)
cum_y_shifts += y_shift
cum_x_shifts += x_shift
cube_reg_sci = cube_sci.copy()
cum_y_shifts_sci = cum_y_shifts[1:(n + 1)]
cum_x_shifts_sci = cum_x_shifts[1:(n + 1)]
for i in range(n):
cube_reg_sci[i] = frame_shift(cube_sci[i], cum_y_shifts_sci[i],
cum_x_shifts_sci[i], imlib=imlib,
interpolation=interpolation)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(cum_x_shifts_sci, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(cum_y_shifts_sci, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n))
la = 'Histogram'
_ = plt.hist(cum_x_shifts_sci, bins=b, alpha=0.5, label=la+' shifts X')
_ = plt.hist(cum_y_shifts_sci, bins=b, alpha=0.5, label=la+' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if ref_star:
cube_reg_ref = cube_ref.copy()
cum_y_shifts_ref = cum_y_shifts[(n + 1):]
cum_x_shifts_ref = cum_x_shifts[(n + 1):]
for i in range(nref):
cube_reg_ref[i] = frame_shift(cube_ref[i], cum_y_shifts_ref[i],
cum_x_shifts_ref[i], imlib=imlib,
interpolation=interpolation)
if ref_star:
if full_output:
return (cube_reg_sci, cube_reg_ref, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci, cum_x_shifts_ref,
cum_y_shifts_ref)
else:
return (cube_reg_sci, cube_reg_ref)
else:
if full_output:
return (cube_reg_sci, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci)
else:
return cube_reg_sci
def _fit_2dannulus(array, fwhm=4, crop=False, cent=None, cropsize=15,
hole_rad=0.5, sampl_cen=0.1, sampl_rad=None, ann_width=0.5,
unc_in=2.):
"""Finds the center the center of a donut-shape signal (e.g. a coronagraphic
PSF) by fitting an annulus, using a grid of positions for the center and
radius of the annulus. The best fit is found by maximizing the mean flux
measured in the annular mask. Requires the image to be already roughly
centered (by an uncertainty provided by unc_in).
Parameters
----------
array : array_like
Image with a single donut-like source, already approximately at the
center of the frame.
fwhm : float
Gaussian PSF full width half maximum from fitting (in pixels).
hole_rad: float, opt
First estimate of the hole radius (in terms of fwhm). The grid search
on the radius of the optimal annulus goes from 0.5 to 2 times hole_rad.
Note: for the AGPM PSF of VLT/NACO, the optimal hole_rad ~ 0.5FWHM.
sampl_cen: float, opt
Precision of the grid sampling to find the center of the annulus (in
pixels)
sampl_rad: float, opt or None.
Precision of the grid sampling to find the optimal radius of the
annulus (in pixels). If set to None, there is no grid search for the
optimal radius of the annulus, the value given by hole_rad is used.
ann_width: float, opt
Width of the annulus in FWHM; default is 0.5 FWHM.
unc_in: float, opt
Initial uncertainty on the center location (with respect to center of
input subframe) in pixels; this will set the grid width.
Returns
-------
mean_y : float
Source centroid y position on the full image from fitting.
mean_x : float
Source centroid x position on the full image from fitting.
if sampl_rad is not None, also returns final_hole_rad:
final_hole_rad : float
Best fit radius of the hole, in terms of fwhm.
"""
if cent is None:
ceny, cenx = frame_center(array)
else:
cenx, ceny = cent
if crop:
x_sub_px = cenx%1
y_sub_px = ceny%1
imside = array.shape[0]
psf_subimage, suby, subx = get_square(array, min(cropsize, imside),
int(ceny), int(cenx),
position=True)
ceny, cenx = frame_center(psf_subimage)
ceny+=y_sub_px
cenx+=x_sub_px
else:
psf_subimage = array.copy()
ann_sz = ann_width*fwhm
grid_sh_x = np.arange(-unc_in,unc_in,sampl_cen)
grid_sh_y = np.arange(-unc_in,unc_in,sampl_cen)
if sampl_rad is None:
rads = [hole_rad*fwhm]
else:
rads = np.arange(0.5*hole_rad*fwhm,2*hole_rad*fwhm,sampl_rad)
flux_ann = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
best_rad = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
for ii, xx in enumerate(grid_sh_x):
for jj, yy in enumerate(grid_sh_y):
tmp_tmp = frame_shift(array,yy,xx)
for rr, rad in enumerate(rads):
# mean flux in the annulus
tmp = frame_basic_stats(tmp_tmp, 'annulus',inner_radius=rad,
size=ann_sz, plot=False)
if tmp > flux_ann[ii,jj]:
flux_ann[ii,jj] = tmp
best_rad[ii,jj] = rad
i_max,j_max = np.unravel_index(np.argmax(flux_ann),flux_ann.shape)
mean_x = cenx - grid_sh_x[i_max]
mean_y = ceny - grid_sh_y[j_max]
if sampl_rad is None:
return mean_y, mean_x
else:
final_hole_rad = best_rad[i_max,j_max]/fwhm
return mean_y, mean_x, final_hole_rad
|
the-stack_0_14421 | #!/bin/env python
"""Log file
Author: Friedrich Schotte, Mar 2, 2016 - Oct 7, 2017
"""
__version__ = "1.1.5" # caching
from logging import debug,warn,info,error
class LogFile(object):
name = "logfile"
from persistent_property3 import persistent_property
filename = persistent_property("filename","")
def __init__(self,name="logfile",columns=["date time","value"],filename=None):
"""filename: where to save
columns: list of strings"""
self.name = name
self.columns = columns
if filename is not None: self.filename = filename
from _thread import allocate_lock
self.lock = allocate_lock()
def log(self,*args,**kwargs):
"""Append to logfile
time: time in seconds since 1970-01-01 00:00:00 UTC
"""
from time import time
from time_string import date_time
from normpath3 import normpath
from os.path import exists,dirname; from os import makedirs
values = args
if "time" in kwargs: timestamp = kwargs["time"]
else: timestamp = time()
with self.lock: # Allow only one thread at a time inside this function.
filename = normpath(self.filename)
if not exists(dirname(filename)): makedirs(dirname(filename))
if not exists(filename): header = "#"+"\t".join(self.columns)+"\n"
else: header = ""
fields = [date_time(timestamp)]+[str(v) for v in values]
line = "\t".join(fields)+"\n"
file(filename,"ab").write(header+line)
def history(self,*args,**kwargs):
"""time_range: tmin,tmax: time in seconds since 1970-01-01 00:00:00 UTC
range: imin,imax: all vaues from imin to imax, including imax
(Negative integers count from the end, -1 = last.)
count: last N
*args: column names"""
from numpy import nan
if "count" in kwargs:
count = kwargs["count"]
lines = self.lines(self.last_lines_range(count))
if "time_range" in kwargs:
time_range = kwargs["time_range"]
lines = self.lines(self.timestamp_range(time_range))
column_names = args
column_indices = [self.columns.index(name) for name in column_names]
values = []
for i in range(0,len(lines)):
if len(lines[i]) == len(column_names):
try:
row_values = [convert(lines[i][j],name) for (j,name)
in zip(column_indices,column_names)]
values += [row_values]
except Exception as msg:
warn("logfile: line %d/%d %r: %s" % (i+1,len(lines),lines[i],msg))
values = zip(*values) # organize data in rows
if values == []: values = [[]]*len(column_names)
return values
def lines(self,start_end):
"""Part of the file.
start: byte offset (self.contents[start] is the first character included)
end: byte offset (self.contents[end] will not be included)
Return value: list of lists of strings, each list representing a line
"""
start = int(start_end[0]) # for the Python 3 competability
end = int(start_end[1]) # for the Python 3 competability
lines = self.content[start:end].split("\n")
# Get rid of empty lines.
if lines[:1] == ['']: lines = lines[1:]
if lines[-1:] == ['']: lines = lines[:-1]
# Get rid of comment lines.
while len(lines)>0 and lines[0].startswith("#"): lines = lines[1:]
lines = [l.split("\t") for l in lines]
return lines
def last_lines_range(self,count):
"""Where are the last n lines from the end of the file?
Return value: tuple of byte offsets: begin,end
"""
content = self.content
j = len(content)
if content[j-1:j] == "\n": j -= 1
i = j
for n in range(0,count):
i2 = content.rfind("\n",0,i)
if i2<0: break
i = i2
i += 1
return i,j
def timestamp_range(self,t): # (t1,t2) -> t for python 3
"""Start and end byte offsets of a time range
t1: seconds since 1970-01-01T00:00:00+00
t2: seconds since 1970-01-01T00:00:00+00
"""
t1 = t[0]
t2 = t[1]
return [self.timestamp_location(t) for t in (t1,t2)]
def timestamp_location(self,timestamp):
"""First line with a time stamp later to the given time stamp.
Return value: byte offset from the beginning of the file.
Length of file if all timestamp in the file are earlier
timestamp: seconds since 1970-01-01T00:00:00+00"""
from numpy import isnan,clip
text = self.content
offset = len(text)/2
step = len(text)/4
while step > 0:
##debug("offset %r, step %r" % (offset,step))
t = self.next_timestamp(text,offset)
if isnan(t): offset = len(text); break
if t <= timestamp: offset += step
else: offset -= step
offset = clip(offset,0,len(text))
step = (step+1)/2 if step > 1 else 0
return offset
@staticmethod
def next_timestamp(text,offset):
from time_string import timestamp
from numpy import nan
i = text.find("\n",offset)+1
if i < 0: t = nan
else:
j = text.find("\t",i)
if j < 0: t = nan
else: t = timestamp(text[i:j])
return t
@property
def content(self):
from normpath3 import normpath
filename = normpath(self.filename)
from mmap import mmap,ACCESS_READ
try:
f = file(filename)
content = mmap(f.fileno(),0,access=ACCESS_READ)
except IOError: content = ""
return content
@property
def content(self):
from os.path import exists,getsize
from normpath3 import normpath
filename = normpath(self.filename)
if exists(filename):
size_change = getsize(filename) - len(self.cached_content)
if size_change > 0:
##debug("Logfile: Reading %d bytes" % size_change)
f = file(filename)
f.seek(len(self.cached_content))
self.cached_content += f.read()
elif size_change < 0:
##debug("Logfile: Reading %d bytes" % getsize(filename))
self.cached_content = file(filename).read()
else: self.cached_content = ""
return self.cached_content
def get_cached_content(self):
if self.filename in self.file_cache:
content = self.file_cache[self.filename]
else: content = ""
return content
def set_cached_content(self,content):
self.file_cache[self.filename] = content
cached_content = property(get_cached_content,set_cached_content)
file_cache = {}
@property
def start_time(self):
from time_string import timestamp
from time import time
lines = self.lines((0,80))
try: t = timestamp(lines[0][0])
except: t = time()
return t
def __len__(self): return self.content[:].count("\n")-1
logfile = LogFile
def convert(x,name):
"""Try to convert string to a Python object.
if not possible return a string
name: if "date time", force conversion from string to seconds"""
if name == "date time": return timestamp(x)
try: return float(x)
except: pass
try: return timestamp(x)
except: pass
return x
def timestamp(date_time):
"""Convert a date string to number of seconds since 1 Jan 1970 00:00 UTC
date_time: e.g. "2017-10-04 20:17:34.286479-0500"
or "2017-10-04 20:17:34-0500"
"""
from datetime import datetime
if date_time[-5] in "+-": date_time,TZ = date_time[:-5],date_time[-5:]
else: TZ = "+0000"
if "." in date_time: format = "%Y-%m-%d %H:%M:%S.%f"
else: format = "%Y-%m-%d %H:%M:%S"
utc_dt = datetime.strptime(date_time,format)
timestamp = (utc_dt - datetime(1970, 1, 1)).total_seconds()
TZ_offset = int(TZ[0:3])*3600
timestamp -= TZ_offset
return timestamp
##from time_string import timestamp
if __name__ == "__main__":
from pdb import pm # for debugging
import logging; logging.basicConfig(level=logging.DEBUG)
from channel_archiver3 import channel_archiver
from time import time
self = channel_archiver.logfile("NIH:TEMP.RBV")
print('t=time(); x=self.history("date time","value",time_range=(time()-10*60,time())); time()-t')
print('len(self.content)')
print('t=time(); x=self.content; time()-t')
|
the-stack_0_14422 | from WMCore.Configuration import Configuration
import os,sys
config = Configuration()
reqNamedFromArg = [ arg for arg in sys.argv if arg.startswith( 'General.requestName=' ) ][0].split( '=' )[-1]
puFromArg = reqNamedFromArg[ reqNamedFromArg.find('PU')+2:]
generationInfo = {'0p5':[0.5 , 200 , 500] ,
'1' : [1.0 , 200 , 500] ,
'1p5' : [1.5 , 200 , 500 ] ,
'10' : [10 , 200 , 500 ] ,
'50' : [50 , 200 , 200 ] ,
'100' : [100 , 500 , 100],
'140' : [140 , 600 , 80 ] ,
'200' : [200 , 1000 , 50 ] }
config.section_('General')
config.General.requestName = ''
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.section_('JobType')
config.JobType.pluginName = 'PrivateMC'
config.JobType.psetName = 'GEN_SIM_DIGI_cfg.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.maxJobRuntimeMin = 3000
config.JobType.sendPythonFolder = True
config.JobType.numCores = 2
config.JobType.maxMemoryMB = 5000
config.JobType.maxJobRuntimeMin = 5000
config.JobType.pyCfgParams = ["pu={0}".format(generationInfo[puFromArg][0])]
config.section_('Data')
config.Data.outputPrimaryDataset = 'NuGun'
config.Data.splitting = 'EventBased'
config.Data.unitsPerJob = generationInfo[puFromArg][2]
config.Data.totalUnits = generationInfo[puFromArg][2] * generationInfo[puFromArg][1]
config.Data.publication = True
config.Data.outputDatasetTag = 'FBCMNuGunPU{0}'.format(puFromArg)
config.Data.outLFNDirBase = '/store/group/dpg_bril/comm_bril/phase2-sim/FBCM/'
config.section_("Site")
config.Site.storageSite = "T2_CH_CERN"
config.Site.whitelist = ["T2_CH_CERN"]
|
the-stack_0_14423 | import sublime
from .event_handler import EventHandler
from .settings import Settings
package_control_installed = False
LOCAL_PACKAGES_VERSION = "0.1.3"
evaluating = False
already_evaluate = False
retry_times = 3
def plugin_loaded():
Settings.reset()
Settings.startup()
print("[Local Packages] v%s" % (LOCAL_PACKAGES_VERSION))
check_package_control()
def check_package_control():
try:
__import__("Package Control").package_control
global package_control_installed
package_control_installed = True
except:
global retry_times
if retry_times > 0:
retry_times -= 1
sublime.set_timeout(check_package_control, 3000)
else:
sublime.error_message(
"Package Control is not found.\n\n" +
"Local Packages will now disabled"
)
return
EventHandler().register_handler(
evaluate_install,
EventHandler().ON_LOAD
)
evaluate_install()
def evaluate_install(view=None):
global evaluating, already_evaluate
if evaluating:
return
if not already_evaluate:
print("[Local Packages] Evaluating missing packages")
from .package_evaluator import PackageEvaluatorThread
evaluating = True
PackageEvaluatorThread(
window=sublime.active_window(),
callback=on_installed
).start()
def on_installed(failed_packages=[]):
global evaluating, already_evaluate
evaluating = False
if already_evaluate:
return
if len(failed_packages) > 0:
msg = "Local Packages failed to install %s missing packages...\n" % (
len(failed_packages)
)
limit = 10
for package in failed_packages:
limit -= 1
if limit < 0:
break
msg += " - %s\n" % (package)
if limit < 0:
msg += "and more..."
sublime.error_message(msg)
else:
print("[Local Packages] Dependencies already installed")
already_evaluate = True
|
the-stack_0_14424 | """
Authorization for Admin API
"""
import re
from shared.models.dashboard_entities import AdminDashboardUser
from shared.service.jwt_auth_wrapper import JWTAuthManager
SCHOOL_REGEX: re.Pattern = re.compile('(?P<school>.+)-admin', flags=re.I)
# JWT Authentication Manager
AUTH_MANAGER = JWTAuthManager(oidc_vault_secret="oidc/admin-jwt",
object_creator=lambda claims, assumed_role, user_roles: AdminDashboardUser(
last_name=claims['family_name'],
first_name=claims['given_name'],
email=claims['email'],
roles=user_roles,
school=SCHOOL_REGEX.match(assumed_role).group('school')
))
OIDC_COOKIE = AUTH_MANAGER.auth_cookie('kc-access', allow_role_switching=True)
# KeyCloak Access Token set by OIDC Proxy (Auth0 Lock)
|
the-stack_0_14425 | from urllib.robotparser import RobotFileParser
from urllib.request import urlopen, Request
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
rp = RobotFileParser()
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:67.0) Gecko/20100101 Firefox/67.0'}
req = Request(url='https://www.jianshu.com/robots.txt', headers=headers)
rp.parse(urlopen(req).read().decode('utf-8').split('\n'))
print(rp.can_fetch('*', 'https://www.jianshu.com/p/b67554025d7d'))
print(rp.can_fetch('*', "https://www.jianshu.com/search?q=python&page=1&type=collections")) |
the-stack_0_14427 | # Import Python Libs
from __future__ import absolute_import
import os
import logging
import shutil
# Local imports
from . import constants
from . import util_which
from . import keyring
from . import ops_pool
from . import rados_client
log = logging.getLogger(__name__)
class Error(Exception):
"""
Error
"""
def __str__(self):
doc = self.__doc__.strip()
return ': '.join([doc] + [str(a) for a in self.args])
class rgw_ctrl(rados_client.ctrl_rados_client):
def __init__(self, **kwargs):
super(rgw_ctrl, self).__init__(**kwargs)
self.service_name = "ceph-radosgw"
# Set path to rgw binary
self.path_service_bin = util_which.which_ceph_rgw.path
self.bootstrap_keyring_type = 'rgw'
self.keyring_service_name = 'client.{name}'.format(name=self.ceph_client_id)
self.keyring_service_capabilities = [
'osd', 'allow rwx',
'mon', 'allow rw'
]
def _set_rgw_path_lib(self):
if self.ceph_client_id == None:
raise Error("rgw name not specified")
self.rgw_path_lib = '{path}/{cluster}-{name}'.format(
path=constants._path_ceph_lib_rgw,
cluster=self.model.cluster_name,
name=self.ceph_client_id
)
def rgw_pools_missing(self):
requiredPools = set([".rgw",
".rgw.control",
".rgw.gc",
".log",
".intent-log",
".usage",
".users",
".users.email",
".users.swift",
".users.uid"
])
pool_ops = ops_pool.ops_pool(self.model)
pool_ops.pool_list()
if self.model.pool_list == None:
log.error("Failed to list available pools")
return False
foundnames = set()
for pool in self.model.pool_list:
foundnames.add(pool)
return list(requiredPools.difference(foundnames))
def rgw_pools_create(self):
rc = True
pool_ops = ops_pool.ops_pool(self.model)
pool_ops.pool_list()
for name in self.rgw_pools_missing():
log.info("Adding missing pool:%s" % (name))
try:
pool_ops.pool_add(name, pg_num=16)
except (ops_pool.Error) as err:
log.error(err)
log.error("Failed to add pool '%s'" % (name))
rc = False
return rc
def prepare(self):
# Due to the way keyring profiles work and the init scripts for rgw we need to
# force users to only create rgw with a 'rgw.' prefix. The reason we dont hide
# this from the user is due to both the systemd files and rgw deployments may
# exist without the prefix if the bootstrap keyring was not used in the key
# creation for the rgw service.
if not self.ceph_client_id.startswith("rgw."):
raise Error("rgw name must start with 'rgw.'")
self.service_available()
self._set_rgw_path_lib()
path_bootstrap_keyring = keyring._get_path_keyring_rgw(self.model.cluster_name)
if not os.path.isfile(path_bootstrap_keyring):
raise Error("Keyring not found at %s" % (path_bootstrap_keyring))
if not os.path.isdir(self.rgw_path_lib):
log.info("Make missing directory:%s" % (self.rgw_path_lib))
os.makedirs(self.rgw_path_lib)
self.keyring_service_path = os.path.join(self.rgw_path_lib, 'keyring')
self.keyring_service_create()
def remove(self):
self._set_rgw_path_lib()
if not os.path.isdir(self.rgw_path_lib):
return
rgw_path_keyring = os.path.join(self.rgw_path_lib, 'keyring')
if os.path.isfile(rgw_path_keyring):
log.info("Remove from auth list keyring:%s" % (rgw_path_keyring))
try:
self.keyring_auth_remove()
except Error:
log.error("Failed to remote from auth list")
removetree = "%s/" % (self.rgw_path_lib)
log.info("Remove directory content:%s" % (removetree))
shutil.rmtree(removetree)
|
the-stack_0_14428 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple endpoint that returns an email or an attachment from one"""
""" THIS ONLY DEALS WITH PUBLIC EMAILS FOR NOW - AAA IS BEING WORKED ON"""
import plugins.server
import plugins.session
import plugins.messages
import plugins.database
import aiohttp.web
import plugins.aaa
import base64
import typing
async def process(
server: plugins.server.BaseServer, session: plugins.session.SessionObject, indata: dict,
) -> typing.Union[dict, aiohttp.web.Response]:
# First, assume permalink and look up the email based on that
email = await plugins.messages.get_email(session, permalink=indata.get("id"))
# If not found via permalink, it might be message-id instead, so try that
if email is None:
email = await plugins.messages.get_email(session, messageid=indata.get("id"))
# If email was found, process the request if we are allowed to display it
if email and isinstance(email, dict) and not email.get("deleted"):
if plugins.aaa.can_access_email(session, email):
# Are we fetching an attachment?
if not indata.get("attachment"):
email["gravatar"] = plugins.messages.gravatar(email)
return email
else:
fid = indata.get("file")
for entry in email.get("attachments", []):
if entry.get("hash") == fid:
ct = entry.get("content_type") or "application/binary"
headers = {
"Content-Type": ct,
"Content-Length": str(entry.get("size")),
}
if "image/" not in ct and "text/" not in ct:
headers["Content-Disposition"] = f"attachment; filename=\"{entry.get('filename')}\""
try:
assert session.database, "Database not connected!"
attachment = await session.database.get(
index=session.database.dbs.attachment, id=indata.get("file")
)
if attachment:
blob = base64.decodebytes(attachment["_source"].get("source").encode("utf-8"))
return aiohttp.web.Response(headers=headers, status=200, body=blob)
except plugins.database.DBError:
pass # attachment not found
return aiohttp.web.Response(headers={}, status=404, text="Attachment not found")
return aiohttp.web.Response(headers={}, status=404, text="Email not found")
def register(server: plugins.server.BaseServer):
return plugins.server.Endpoint(process)
|
the-stack_0_14430 | # -*- coding: utf-8 -*-
# Radproc - A GIS-compatible Python-Package for automated RADOLAN Composite Processing and Analysis.
# Copyright (c) 2018, Jennifer Kreklow.
# DOI: https://doi.org/10.5281/zenodo.1313701
#
# Distributed under the MIT License (see LICENSE.txt for more information), complemented with the following provision:
# For the scientific transparency and verification of results obtained and communicated to the public after
# using a modified version of the work, You (as the recipient of the source code and author of this modified version,
# used to produce the published results in scientific communications) commit to make this modified source code available
# in a repository that is easily and freely accessible for a duration of five years after the communication of the obtained results.
"""
================================
DWD MR90 Gauge Data Processing
================================
Collection of functions for processing DWD rain gauge data in MR90 format.
Convert gauge data to pandas DataFrames with same format as RADOLAN data and saves them as HDF5 datasets.
.. autosummary::
:nosignatures:
:toctree: generated/
stationfile_to_df
summarize_metadata_files
dwd_gauges_to_hdf5
.. module:: radproc.dwd_gauge
:platform: Windows
:synopsis: Python package radproc (Radar data processing), Module arcgis
.. moduleauthor:: Jennifer Kreklow
"""
import numpy as np
import pandas as pd
import os, gc
from datetime import datetime
from multiprocessing import Pool
import warnings, tables
def _read_line(line):
"""
Read in one line (= 1 hour) of gauge data according to MR90 format description.
10-minute-blocks are merged to 60-minute-blocks and time index is shifted to make data hours begin at hh:50 and convert time zone from MEZ to UTC.
:Parameters:
------------
line : string
data line containing station number, date and minute measurement data of weighing (Tropfer) and seesaw (Wippe) method in 10-minute-blocks.
:Returns:
---------
data : dictionary
with data collected from line.
keys: statnr, startdatum_dt (datetime-object), startdatum (string), dateIndex_UTC, wippe, tropfer, N_gefallen und qualitaetsbyte
"""
data = dict(wippe = "", tropfer = "")
#data['kennung'] = line[0:2]
data['statnr'] = line[2:7].strip()
year = int(line[7:11])
month = int(line[11:13])
day = int(line[13:15])
hour = int(line[15:17])
data['startdatum_dt'] = datetime(year, month, day, hour)
data['startdatum'] = data['startdatum_dt'].strftime("%Y-%m-%d %H:%M")
# Erstellen einer einstündigen pandas TimeSeries mit fester minütlicher Frequenz
# shift -70, da Beginn um xx:50 der Vorstunde und MEZ-1h = UTC
data['dateIndex_UTC'] = pd.date_range(start = data['startdatum'], periods = 60, freq = '1min').shift(-70).tolist()
#leerfeld = line[17:19]
# Zusammenfügen der Messwerte aus den 10-Min-Blöcken zu einem 60-Min-String
# Positionen der Liste (kennzeichnen jew. den Beginn eines Blocks) gem. Formatbeschreibung des DWD
for wippe_start in [19, 100, 181, 262, 343, 424]:
tropfer_start = wippe_start + 30 #Datensatz Wippe: 30 Zeichen
N_gefallen_start = wippe_start + 70 # Datensatz Tropfer: 40 Zeichen
#qualitaetsbyte = wippe_start + 80 #Datensatz Indikator: 10 Zeichen, Qualitätsbyte = 1 Zeichen --> gesamt: 81 Zeichen
data['wippe'] = data['wippe'] + line[wippe_start:tropfer_start]
data['tropfer'] = data['tropfer'] + line[tropfer_start:N_gefallen_start]
#daten['N_gefallen'] = daten['N_gefallen'] + line[N_gefallen_start:qualitaetsbyte]
#daten['qualitaetsbyte'] = daten['qualitaetsbyte'] + line[qualitaetsbyte]
return data
def _interpret_line(data_dict):
"""
Convert and decode data line of one hour from dictionary to pandas DataFrame.
Decode data to precipitation values in mm,
insert np.nan as NoData value where necessary and
convert data to one-column DataFrame with time index
:Parameters:
------------
data_dict : dictionary
with data collected from data line.
necessary keys: statnr, dateIndex_UTC, wippe, tropfer
dictionary can be read in with function _read_line()
:Returns:
---------
df : one-column pandas DataFrame
with precipitation data of one hour in mm
"""
wippe = data_dict['wippe']
tropfer = data_dict['tropfer']
dateIndex = data_dict['dateIndex_UTC']
arr = np.zeros(60, dtype = np.float32)
arr.fill(np.nan)
s = pd.Series(arr, index = dateIndex)
tropferNoData = 60 * "-999"
#wippeNoData = 60 * "-99"
# Interpretation der Daten:
# Standardmäßig werden die Tropfermessungen ausgewertet und in pandas Series s eingefügt.
# Ausnahme: alle 60 Zeitpunkte haben den Wert -999, also NoData. Nur dann wird auf die Wippenwerte zugegriffen.
# Jeder Tropferwert besteht aus vier Zeichen. Diese werden nacheinander abgerufen und interpretiert.
# -999 = Fehlkennung --> np.nan --> pass, da Series s bereits mit NaN initialisiert wurde
# -001 = kein Niederschlag --> 0.0
# xx = xx * 0.01 mm Niederschlag
# einige Zeitpunkte sind fehlerhaft und haben den Wert "0000". Diesen wird der Niederschalg 0.0 zugewiesen.
if tropfer != tropferNoData:
# Tropfermessung vorhanden
k = 0
for i in range(0, len(tropfer), 4):
value = tropfer[i:i+4]
if value == "-999":
pass
elif value == "-001" or value == "0000":
s[dateIndex[k]] = 0.0
else:
try:
s[dateIndex[k]] = float(value)*0.01
except:
s[dateIndex[k]] = np.nan
k += 1
else:
# Wippenmessung vorhanden.
# Jeder Wippenwert besteht aus drei Zeichen. Diese werden nacheinander abgerufen und interpretiert.
# -99 = Fehlkennung --> np.nan
# -01 = kein Niederschlag --> 0.0
# xx = xx * 0.1 mm Niederschlag
# einige Zeitpunkte sind fehlerhaft und haben den Wert "000". Diesen wird der Niederschalg 0.0 zugewiesen.
k = 0
for i in range(0, len(wippe), 3):
value = wippe[i:i+3]
if value == "-99":
pass
elif value == "-01" or value == "000":
s[dateIndex[k]] = 0.0
else:
try:
s[dateIndex[k]] = float(value)*0.1
except:
s[dateIndex[k]] = np.nan
k += 1
# Umwandlung der Series in einen einspaltigen DataFrame.
# Notwendig, um den Spaltennamen mit der Stationsnummer speichern zu können.
df = pd.DataFrame(s.values, index = s.index, columns = [data_dict['statnr']])
return df
def stationfile_to_df(stationfile):
"""
Import a textfile with DWD rain gauge data in MR90 format into a one-column pandas DataFrame.
Downsample frequency from 1 to 5-minute intervals to adjust temporal resolution to best-resolved RADOLAN data produt YW.
Convert time zone to UTC.
:Parameters:
------------
stationfile : string
Path and name of textfile containing rain gauge measurements.
:Returns:
---------
df : one-column pandas DataFrame
with data imported from stationfile downsampled to 5-minute intervals.
"""
#fails = []
#for stationfile in stationfiles: --> unnötig, da map() beim parallel processing die Schleife ersetzt
f = open(stationfile, "r")
lines = f.readlines()
f.close()
df = pd.DataFrame()
for line in lines:
dataline = _read_line(line) #erstelle Dictionary
try:
df_hour = _interpret_line(dataline)
df_5min = df_hour.resample('5min', how = 'sum', closed = 'left', label = 'left')
df = pd.concat([df,df_5min], axis = 0)
except:
# Hinweis: Ausgabe der Fehler funktioniert nicht bei Verwendung von Parallel Processing. Daher auskommentiert.
#print "Problem bei Stunde beginnend um %s UTC in Station %s." % (str(daten['dateIndex_UTC'][0]), daten['statnr'])
#fails.append((str(daten['dateIndex_UTC'][0]), daten['statnr']))
continue
del lines, df_hour, df_5min
gc.collect()
df = df.tz_localize('UTC')
#print "Datei %s erfolgreich bearbeitet. Dauer: %.2f Minuten" % (stationfile, (time.time() - t0)/60)
return df
def summarize_metadata_files(inFolder):
"""
Import all metafiles and summarizes metadata in a single textfile.
Metadata include information on station number and name, geographic coordinates and height above sea level.
:Parameters:
------------
inFolder : string
Path of directory containing metadata files for DWD gauges.
:Returns:
---------
summaryFile : string
Path and name of output summary file created.
"""
metaFiles = [os.path.join(inFolder, mf) for mf in os.listdir(inFolder)]
summaryFile = os.path.join(os.path.split(inFolder)[0], "metadata_summary.txt")
outFile = open(summaryFile, "w")
i = 0
for f in metaFiles:
infile = open(f, "r")
while True:
line = infile.readline().strip()
if line.startswith("Station="):
break
#print line
line = line.replace(":", " ")
outFile.write(line[:-1] + "\n")
infile.close()
i += 1
outFile.close()
return summaryFile
def dwd_gauges_to_hdf5(inFolder, HDFFile):
"""
Import all textfiles containing DWD rain gauge data in MR90 format from input folder into a DataFrame and save it as monthly HDF5 datasets.
Frequency is downsampled from 1 to 5-minute intervals to adjust temporal resolution to RADOLAN product YW.
Time zone is converted from MEZ to UTC.
:Parameters:
------------
inFolder : string
Path of directory containing textfiles with DWD rain gauge data in MR90 format.
HDFFile : string
Path and name of the HDF5 file.
If the specified HDF5 file already exists, the new dataset will be appended; if the HDF5 file doesn't exist, it will be created.
:Returns:
---------
None
Save monthly DataFrames to specified HDF5 file.
:Note:
------
To import gauge data from HDF5, you can use the same functions from radproc.core as for RADOLAN data since both are stored the same data format and structure.
"""
stationfiles = [os.path.join(inFolder, f) for f in os.listdir(inFolder)]
#stationframes = []
# Prozessierung der Stationsdateien mit Parallel Processing, um die Geschwindigkeit zu erhöhen.
# Die Funktion Pool() aus dem Modul multiprocessing erzeugt mehrere Subprozesse auf unterschiedlichen Prozessorkernen,
# welche die Multiprocessing-Sperre (GIL) umgehen, die normalerweise bei Python besteht.
# map() nimmt eine Funktion und eine Liste als Eingabeargumente entgegen und gibt eine Liste
# als Ausgabe zurück. Die Funktion wird auf unterschiedlichen Prozessorkernen für jedes Listenelement ausgeführt.
# optional kann mit Pool(x) die Anzahl x der zu verwendenden Kerne übergeben werden.
# Das Ergebnis stationframes ist eine Liste mit einspaltigen DataFrames der Ombrometerstationen.
p = Pool()
stationframes = p.map(stationfile_to_df, stationfiles)
# Zusammenfügen der Dataframes zu einem DF mit einer Spalte pro Station
gaugeDF = pd.concat(stationframes, axis = 1, join = 'outer', copy=False)
#ombroDF = ombroDF.asfreq('5min')
gaugeDF.columns.name = 'DWD gauges'
gaugeDF.index.name = 'Date (UTC)'
#summaryFile = summarize_metadata_files(inFolder_metadata)
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
hdf = pd.HDFStore(HDFFile, mode = "a")
for year in np.unique(gaugeDF.index.year):
for month in range(1, 13):
try:
ind = "%i-%02i" %(year, month)
HDFDataset = "%i/%i" %(year, month)
hdf.put(HDFDataset, gaugeDF.loc[ind], data_columns = True, index = True)
except:
# in case of unavailable months
continue
hdf.close()
|
the-stack_0_14431 | import random
# СЛОВАРИ
# Это структура данных, которая содержит неупорядоченную последовательность.
# Если в списках элементы упорядочены по индексам, то в Словарях объекты распалагаются в парах: ключ-значение
# Словари напоминают списки, но есть одно принцимпиальное различие: они состоят из ключей и значений.
# Ключ, это тот элемент, по котрому мы получаем значение
dictionaty_list = {'name': 'Ariel'} # сначала ключ 'name', потом значение 'Ariel' в фигурных скобках
# ключ является не изменяемым объектом, тогда как значение может быть любым объектом.
# В словаре не может быть одинаковых ключей, тогда как их значения могут повторяться (совпадать).
# в качестве значения можно использовать объекты любого типа, в том числе списки и сами словари.
# В словаре доступ к значениям происходит по ключам, тогда как в списках - по индексам.
# Марка автомобиля - ключ, цена автомобиля - значение
car_prices = {'opel': 5000, 'toyota': 7000, 'bmw': 10000}
# Все как в книжном словаре - находим слово(ключ) и получаем его значение.
print('Выводим цены на автомобили', car_prices) # Вывод {'opel': 5000, 'toyota': 7000, 'bmw': 10000}
# Такая структура данных называется dictionaty / словарь
# Все операции в словаре происходят очень быстро. Это эффективно реализованная конструкция.
# ---------------------- Способы инициализации словарей ----------------------
# 1 Способ
# Словари объявляются (инициализируются) фигурными скобками {}
# dict_temp = {}
# Заполняем словарь ключ-значение
dict_temp = {'dict1': 1, 'dict2': 2.1, 'dict3': 'name', 'dict4': [1, 2, 3]}
print(type(dict_temp), dict_temp) # выводим тип словаря и сам словарь
# 2 Способ
# с помощью метода fromkeys()
dict_temp = dict.fromkeys(['a', 'b']) # dict - указание класса
# Здесь создан словарь, где указаны ключи a и b с пустыми значениями.
print(type(dict_temp), dict_temp) # получаем на выходе <class 'dict'> {'a': None, 'b': None}
# для установки значений нужно подать еще один список
dict_temp = dict.fromkeys(['a', 'b'], [12, '2020']) # dict - указание класса
print('C помощью метода fromkeys()', type(dict_temp),
dict_temp) # получаем на выходе <class 'dict'> {'a': [12, '2020'], 'b': [12, '2020']}
# 3 Способ
# Инициализация словаря с помощью генератора
dict_temp = {a: a ** 2 for a in range(10)} # конструкция такая же, как и в списках.
# a - ключ, a**2 - значение, далее конструкция цикла
print('Инициализация с помощью генератора', type(dict_temp), dict_temp)
# получаем на выходе <class 'dict'> {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}
# 4 Способ (редко используется, так как все ключи-значения надо вносить вручную)
dict_temp = dict(brend='volvo', price=5000)
print(type(dict_temp), dict_temp) # получаем на выходе <class 'dict'> {'brend': 'volvo', 'price': 5000}
# 5 ак же существует функциональность создания Словаря из списков
# Есть такой класс славаря dict. Это встроенный класс, поэтому он пишется с маленькой буквы.
# Свои собственные классы мы должны записывать с Большой буквы.
# У этого класса dict есть свой собственный метод уровня класса fromkeys()
# С помощью этого метода мы можем создать из двух последовательностей какой-то словарь.
# Например, мы можем передать в него два списка
my_dict = dict.fromkeys((1, 2, 3), ('apple', 'orange', 'banana'))
print(my_dict)
# Получаем словарь, где каждому ключу присваивается второй параметр
# {1: ('apple', 'orange', 'banana'), 2: ('apple', 'orange', 'banana'), 3: ('apple', 'orange', 'banana')}
# То есть, здесь автоматизировано создание словаря, чтобы мы не прописывали его в ручную каждый раз.
# ----------------------- Обращение к содержимому словаря --------------------------
# Обращение к содержимому происходит по ключу, который указываем в квадратых скобках.
print("Выводим марку автомобиля: ", dict_temp['brend']) # выводим значение, указывая ключ
print('Выводим цену Тойоты: ', car_prices['toyota'])
# ------------------ Функции со словарями ------------------------------
# Часто необходимо знать все ключи словаря и все значения
# Получим все ключи словаря
print(dict_temp.keys()) # Возвращается специальный тип dict_keys
# на выходе имеем dict_keys(['brend', 'price'])
# Но, как правило, с этим типом не работают. Его приводят к листу:
print(list(dict_temp.keys())) # на выходе получаем ['brend', 'price']
# И дальше, все операции, которые необходимо проверсти с ключами, делают с типом list
# Значения получают через ф-ю value
print(list(dict_temp.values())) # на выходе имеем ['volvo', 5000]
# В таком формате вывода,при работе с коючами и со значениями можно использовать все методы, которые имеются в листах.
# Также можно работать с парами ключ-значение. Для этого нужно использовать метод items()
# items() возвращает лист из картежей - пара ключ-значение (Картеж, это тот же самый лист, только неизменяемый)
print(list(dict_temp.items())) # на выходе имеем [('brend', 'volvo'), ('price', 5000)]
# ------------------------ Работа с элементами ---------------------------------
# Нам необходимо получать значения, изменять их (потому что словарь, это изменяемый тип) и добавлять новые
# Переинициализируем переменную для рассмотрения следующих задач
dict_temp = {a: a for a in range(10)}
print('Переинициализация', dict_temp) # На выходе: {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
# МЕНЯЕМ значение.
# Какому-то ключу присвоим другое значение
dict_temp[0] = 100
print(dict_temp) # на выходе имеем {0: 100, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
# Нулевому ключу теперь соответствует значение 100
# ДОБАВЛЯЕМ пары ключ-значение
dict_temp['name'] = 'Alex' # Создаем новый ключ и присваиваем ему значение.
print(dict_temp) # Вывод на экран: {0: 100, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 'name': 'Alex'}
car_prices['mazda'] = 4000
print('Добавление нового Ключа \'mazda\' в список автомобилей: ', car_prices)
# Вывод: {'opel': 5000, 'toyota': 7000, 'bmw': 10000, 'mazda': 4000}
# В словаре можно изменить Значение, но нельзя изменить Ключ
car_prices['opel'] = 2000 # Здесь мы пытаемся добавить элемент с Ключом 'opel', который уже есть в словаре
print('Пытаемся добавить второй колюч \'opel\' в словарь ', car_prices)
# Вывод: {'opel': 2000, 'toyota': 7000, 'bmw': 10000, 'mazda': 4000}
# Но добавления не происходит. Происходит изменение значения старого на новое
# Это способ изменения значения в словаре
# Удаление значений из Словаря происходит при помощи команды del
# ----------------------- МЕТОДЫ СЛОВАРЕЙ ---------------------------------
# У словарей большое количество методов. Какие-то мы уже рассмотрели, это keys(), values(), items()
# C помощью методов словарь можно очищать, копировать, удалять
# -------------------- del -----------------------
# Удаляет значение в словаре по ключу
del car_prices['toyota']
print('Удалиил из списка машин Тойоту ', car_prices)
# Вывод: {'opel': 2000, 'bmw': 10000, 'mazda': 4000}
# Но, с командой del надо быть осторожным. Если вы забудите указать Ключ, значение которого хотите удалить,
# то эта команда УДАЛИТ ВЕСЬ СЛОВАРЬ ВМЕСТЕ С ПЕРЕМЕННОЙ!
# Значение при этом не возвращается. Если нужно получить удаленное значение, тогда надо использовать метод pop()
# --------------------------- pop() ------------------------
# Удаляет значение по Ключу
temp = dict_temp.pop('name') # удаляем ключ 'name', в месте с ним удаляется и его значение 'Alex'
# При этом он возвращает удаленное ключ-значение через новую пересенную
print(dict_temp) # Вывод на экран: {0: 100, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
print(temp) # Вывод на экран: Alex
# Если такого ключа нет, будет возвращено default без выбрасывания каких-то исключений
# ------------------ clear() ----------------------
# Метод clear() очищает Словарь с сохранением переменной
car_prices.clear()
print('Очищаем словарь car_prices ', car_prices) # Вывод: {}
# ------------------------- Многоструктурность словарей -----------------------------
# Словари могут содержать не просто парв Ключ - Значение, но и более сложные структуры,
# как, например, другой Словарь.
# Словари часто используются для описания каких-то объектов.
# Для удобства, их можно записывать в несколько строк
person = {
'first name': 'Jack',
'second name': "Brown",
'age': 23,
'hobbies': ['footbal', 'singing', 'photo'], # Список
'children': {'son': 'Alex', 'daugter': 'Marry'} # Другой Словарь
}
# Как получить доступ к содержимому этого словаря?
print('Получаем информацию из словаря: возраст человека ', person['age']) # Вывод: 23
# То есть указываем Ключ и по Ключу получаем его значение.
# Как получить список/list значений?
print('Получаем Список значений из Словаря ', person['hobbies']) # Вывод: ['footbal', 'singing', 'photo']
# Здесь получаем весь список, Но, что делать, если мы хотим получить одно из значений в списке?
# Например к последнему - 'photo'? Это можно сделать в два этапа.
# 1 этап - передача списка в новую переменную
hobbies = person['hobbies'] # Передаем в переменную весь список
# 2 этап - получаем значение из переменной по индексу
print('Получаем значение из переменной по индексу', hobbies[2]) # Вывод: photo
# Это можно сделать и более короткой записью
print('Получаем значение из вложенного Списка коротким спосбом ',
person['hobbies'][2])
# Как получить доступ к вложенному Словарю?
# Также в два этапа:
# 1 этап - объявляем новый Список и присваиваем ему значения из вложенного Словаря
childrens = person['children'] #
# 2 этап - выводим на экран значение, указывая нужное поля из вложенного Словаря
print('Получаем доступ к вложенному Словарю ', childrens['son']) # указываем объявленную до этого переменную
# Более короткий способ записи:
print('Получаем значение из вложенного Словаря коротким способом ',
person['children']['son'])
# Добавление данных в Многоструктурный Словарь
person['car'] = 'Mazda' # добавляем новые Ключ-Значение
print('Выводим на экран весь Многоструктурный Словарь', person)
# Вывод: {'first name': 'Jack', 'second name': 'Brown', 'age': 23, 'hobbies': ['footbal', 'singing', 'photo'],
# 'children': {'son': 'Alex', 'daugter': 'Marry'}, 'car': 'Mazda'}
# в конце добавлена информация о машине.
# Добавляем данные через цикл
lst_name = ['Marry', 'Alex', 'Kate', 'Jack', 'Anna', 'Kate', 'Ronald', 'Maria', 'Tatyana', 'Evgeniy',
'Alex', 'Maria', 'Svetlana', 'Artem', 'Igor', 'Ilya']
names_dict = {}
for i in range(len(lst_name)):
names_dict[lst_name[i]] = lst_name.count(lst_name[i])
# "Marry" 1
# "Alex" 2
# "Kate" 2
# "Jack" 1
# "Anna" 1
# "Ronald" 1
# "Maria" 2
# "Tatyana" 1
# "Evgeniy" 1
# "Svetlana" 1
# "Artem" 1
# "Igor" 1
# "Ilya" 1
# Изменение данных в Многоструктурном Словаре
# Допусти, мы хотим поменять информацию во Вложенном Списке - 'footbal' на 'basketbal'
person['hobbies'][0] = 'basketbal' # получаем доступ к Вложенному Списку и по индексу меняем его значение
print('Меняем значение во Вложенном Списке', person)
# Вывод: {'first name': 'Jack', 'second name': 'Brown', 'age': 23, 'hobbies': ['basketbal', 'singing', 'photo'],
# 'children': {'son': 'Alex', 'daugter': 'Marry'}, 'car': 'Mazda'}
# Еще один метод доступа к Многоструктурному Словарю
print('Получаем список ключей Многостр. Словаря', person.keys()) # С помощью ф-и keys()
# Вывод: dict_keys(['first name', 'second name', 'age', 'hobbies', 'children', 'car'])
print('Получаем список значений Многостр. Словаря', person.values()) # С помощью ф-и values()
# Вывод:
# dict_values(['Jack', 'Brown', 23, ['basketbal', 'singing', 'photo'], {'son': 'Alex', 'daugter': 'Marry'}, 'Mazda'])
print('Получаем список элементов Многостр. Словаря', person.items()) # С помощью ф-и items()
# Вывод: dict_items([('first name', 'Jack'), ('second name', 'Brown'), ('age', 23),
# ('hobbies', ['basketbal', 'singing', 'photo']), ('children', {'son': 'Alex', 'daugter': 'Marry'}), ('car', 'Mazda')])
# Вот такая струтура в скобках ('first name', 'Jack') называесят Tiple - Картеж.
# --------------------- Итерирование по словарю ------------------------------------
# Итерация по Словарям происходит также, как и по Спискам
for pair in dict_temp.items():
print(pair)
# На выходе получаем список пар ключ-значение:
# (0, 100)
# (1, 1)
# (2, 2)
# (3, 3)
# (4, 4)
# (5, 5)
# (6, 6)
# (7, 7)
# (8, 8)
# (9, 9)
# Также можно итерироваться, работая с комплектными элементами
for key, value in dict_temp.items(): # key - первое значение в паре, value - второе значение в паре
print(key, value)
# На выходе:
# 0 100
# 1 1
# 2 2
# 3 3
# 4 4
# 5 5
# 6 6
# 7 7
# 8 8
# 9 9
# Можно тем же способом итерироваться по отдельным элементам конструкции
# По ключам:
for key in dict_temp.keys():
print(key)
# По значениям:
for value in dict_temp.values():
print(value)
# Операции со значениями
for value in dict_temp.values():
print(value + 10, end=' ') # 110 11 12 13 14 15 16 17 18 19
# По нескольким словарям одновременно
jack = {
'name': 'jack',
'car': 'bmw'
}
john = {
'name': 'john',
'car': 'audi'
}
# Нам надо собрать, например, информацию о машинах.
# создаем список словарей
drivers = [jack, john]
cars = []
for persons in drivers:
cars.append(persons['car'])
print(cars)
# Более короткая запись через list comprehension
new_cars = [persons['car'] for persons in drivers]
print(new_cars)
# Такая запись person['car'] по сути - антипатерн. То есть, мы обращаеся к ключу 'car' резюмируя, что он есть.
# Но, а вдруг его там нет? Вдруг в списке есть человек, у которого нет машины? Тогда обращение к несуществующему
# ключу приведет к исключению и скрипт прекратит свою работу. Поэтому считается, что хорошей практикой использование
# специального метода get() у словарей, который первым аргументом достает нам нужные данные по ключу.
new_cars = [person.get('car', '') for persons in drivers]
# То есть, мы знаем или предполагаем, что у словаря есть ключ 'car' и нам надо его получить.
# Тогда мы методу get() передаем название этого ключа, а вторым аргументом '' мы передаем значение по умолчаню
# на тот случаей, если нужного ключа там не осатнется. Чтобы не возникало исключение, он вернет нам пустую строку.
# ------------------------- Сортировка словаря -------------------------------------
# На самом деле содержимое словаря отсортировать нельзя, так как словарь в Python - это неупорядоченная структура данных.
# Даже если вы будете добавлять в словарь элементы упорядоченно, например по алфавиту,
# при выводе они могут отобразится по-другому.
#
# Однако при извлечении элементов из словаря можно сделать так, чтобы это происходило согласно определенному порядку.
# Для этого дополнительно используется упорядоченная структура, которую можно отсортировать. Например, список.
#
# Сортировка по ключам
# Проще всего выполнить сортировку словаря по ключам. Алгоритм вывода содержимого словаря:
#
# Создать список ключей словаря.
# Отсортировать его.
# В цикле for перебрать элементы списка, используя элемент списка как ключ словаря.
d = {'a': 10, 'b': 15, 'c': 4}
list_keys = list(d.keys())
list_keys.sort()
for i in list_keys:
print(i, ':', d[i])
a : 10
b : 15
c : 4
# Сортировка по значениям
# Отсортировать словарь по значениям сложнее, так как обращаться к элементам словаря можно только по ключам.
# Однако можно создать список кортежей ("ключ", "значение") и отсортировать его по вторым элементам пар.
# Далее в программе используется именно данная упорядоченная структура, а не сам оригинальный словарь.
d = {'a': 10, 'b': 15, 'c': 4}
list_d = list(d.items())
print(list_d)
# [('a', 10), ('b', 15), ('c', 4)]
list_d.sort(key=lambda i: i[1])
print(list_d)
# [('c', 4), ('a', 10), ('b', 15)]
for i in list_d:
print(i[0], ':', i[1])
c : 4
a : 10
b : 15
# Если бы мы использовали метод sort() без параметра key, то сортировка была бы выполнена по первым элементам кортежей.
# В качестве значения для key указывается функция. В данном случае используется lambda-функция, что уменьшает объем кода.
# В функцию передаются кортежи, а возвращаются их вторые элементы, по которым происходит сортировка.
#
# Класс OrderedDict модуля collections
# В модуле collections имеется класс OrderedDict, который является подклассом класса dict, то есть обычного встроенного
# в Python словаря. OrderedDict позволяет создавать объекты-словари, которые помнят порядок своих элементов. Также класс имеет ряд методов, которые могут изменять порядок элементов в словаре.
from collections import OrderedDict
a = OrderedDict({1: 10, 0: 5})
print(a)
OrderedDict([(1, 10), (0, 5)])
a[2] = 20
print(a)
OrderedDict([(1, 10), (0, 5), (2, 20)])
for i in a:
print(i, ':', a[i])
# 1 : 10
# 0 : 5
# 2 : 20
# ---------------------------- Решение задачь с помощью Словарей ------------------------------
# Как подсчитать количество повторений одинаковых элементов в списке
text = "Ну и деревня! С роду таких деревень не видел и не знал, что такие такие деревни бывают."
count_dict = {}
# В первом цикле инициализируем словарь
for i in range(len(text)):
count_dict[text[i]] = 0 # Каждому ключу в виде буквы присваиваем значение 0
# Во втором цикле заполняем занчениями (Первый вариант)
# for i in range(len(text)):
# count_dict[text[i]] += 1
#
# print(count_dict)
# {'Н': 1, 'у': 2, ' ': 16, 'и': 7, 'д': 5, 'е': 12, 'р': 4, 'в': 5, 'н': 6, 'я': 1, '!': 1, 'С': 1, 'о': 2, 'т': 5,
# 'а': 5, 'к': 3, 'х': 1, 'ь': 1, 'л': 2, 'з': 1, ',': 1, 'ч': 1, 'б': 1, 'ы': 1, 'ю': 1, '.': 1}
# Второй вариант
for i in text: # здесь проходимся по самому тексту и подмещяем в i конкретную букву
count_dict[i] += 1
print(count_dict)
# Выводим конкретный символ на экран
sym = input('Введите символ: ')
if sym in count_dict:
print('Символ', sym, 'встречается в тексте', count_dict[sym], 'раз.')
else:
print('Такого символа в тексте нет.')
# Как посмотреть список ключей
keys_dict = count_dict.keys()
print('Список ключей ', keys_dict)
# Список ключей dict_keys(['Н', 'у', ' ', 'и', 'д', 'е', 'р', 'в', 'н', 'я', '!', 'С', 'о', 'т', 'а', 'к', 'х', 'ь',
# 'л', 'з', ',', 'ч', 'б', 'ы', 'ю', '.'])
# Список значений
val_dict = count_dict.values()
print('Список значений', val_dict)
# Список значений dict_values([1, 2, 16, 7, 5, 12, 4, 5, 6, 1, 1, 1, 2, 5, 5, 3, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1])
# Напишите функцию (F): на вход список имен и целое число N;
# # на выходе список длины N случайных имен из первого списка (могут повторяться,
# # можно взять значения: количество имен 20, N = 100,
# # рекомендуется использовать функцию random);
lst_name = ['Nancy', 'Alice', 'Mary', 'Hanna', 'Dolores', 'Brian', 'Stanley', 'Andrew', 'Michael', 'Nickolas',
'Johnathan', 'Angeline']
N = 100
def f(lst, n):
rand_list = [] # пустой список для вывода результата
for i in range(n): # запускаем цикл на n итераций
# rand_name = random.choice(lst) # выбираем случайное имя из списка и присваиваем ее переменной
# rand_list.append(rand_name) # добавляем случайное имя в результирующий список
rand_list.append(random.choice(lst)) # объединил две предыдущие строки в одну
return rand_list # возвращаем список с количеством случайных имен n
fin_list = f(lst_name, N) # вызываем ф-ю с передачей в нее параметров
print(fin_list) # выводим результат
# Получили результирующий список fin_list, с которомы будем работать дальше
# Напишите функцию вывода самого частого имени из списка на выходе функции F (список fin_list);
# Решение с помощью цикла
names_dict = {}
for i in range(len(fin_list)):
names_dict[fin_list[i]] = fin_list.count(fin_list[i]) # в цикле загоняем значения в словарь
top_names_list = list(names_dict.items()) # перекидываем инфу из Словаря в Лист
top_names_list.sort(key=lambda i: i[1], reverse=True) # сортируем Список по значениям
# и переворачиваем от большего к меньшим
print(f'Имя {top_names_list[0][0]} встречается чаще других, а именно {top_names_list[0][1]} раз.')
# Решение с помощью функции
def top(fin_list):
# Получаем уникальные значения списка через обертку set
# Через обертку Словарь листаем в цикле список и считаем количество повторений каждого слова
pop_name = dict((fin_list.count(i), i) for i in set(fin_list))
return pop_name[max(pop_name.keys())]
print(f'Имя {top(fin_list)} встречается чаще других, а именно {fin_list.count(top(fin_list))} раз.')
# 3. Напишите функцию вывода самой редкой буквы, с которого начинаются имена в списке на выходе функции F.
# Решение с помощью цикла
letters_dict = {}
for i in range(len(fin_list)):
letters_dict[fin_list[i][0]] = fin_list.count(fin_list[i])
letters_list = sorted(letters_dict.items(), key=lambda i: i[1])
# print(letters_list)
print('Первая буква', letters_list[0][0], 'в именах встречается реже других, а именно', letters_list[0][1], 'раз.')
# решение с помощью функции
letters_list = [fin_list[i][0] for i in range(len(fin_list))]
# print(letters_list)
def rare(letters_list):
letters_dict = dict((letters_list[i], letters_list.count(letters_list[i])) for i in range(len(letters_list)))
letters_dict_sort = sorted(letters_dict.items(), key = lambda i: i[1])
return letters_dict_sort[0]
# print(rare(letters_list))
print(f'Первая буква {(rare(letters_list))[0]} в именах встречается реже других, а именно {(rare(letters_list))[1]} раз.')
|
the-stack_0_14433 | import pytest
import os
from g_code_parsing.g_code_engine import GCodeEngine
from g_code_parsing.g_code_program.supported_text_modes import (
SupportedTextModes,
)
from opentrons.hardware_control.emulation.settings import (
Settings,
SmoothieSettings,
PipetteSettings,
)
from g_code_parsing.utils import get_configuration_dir
CONFIG = Settings(
host="0.0.0.0",
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
right=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
),
)
PROTOCOL_PATH = os.path.join(
get_configuration_dir(), "protocol", "protocols", "smoothie_protocol.py"
)
@pytest.fixture
def protocol_g_code_engine() -> GCodeEngine:
return GCodeEngine(CONFIG)
async def test_watcher_command_list_is_cleared(protocol_g_code_engine: GCodeEngine):
"""
If everything is cleaning up correctly then 2 runs of the same protocol
should return the same exact G-Code
"""
with protocol_g_code_engine.run_protocol(PROTOCOL_PATH) as run_1:
run_1_desc = run_1.get_text_explanation(SupportedTextModes.G_CODE)
with protocol_g_code_engine.run_protocol(PROTOCOL_PATH) as run_2:
run_2_desc = run_2.get_text_explanation(SupportedTextModes.G_CODE)
assert run_1_desc == run_2_desc
|
the-stack_0_14436 | # -*- coding: utf-8 -*-
"""Plugin to create a Quantum Espresso neb.x input file."""
import copy
import os
from aiida import orm
from aiida.common import InputValidationError, CalcInfo, CodeInfo
from aiida.common.lang import classproperty
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.calculations import _lowercase_dict, _uppercase_dict, _pop_parser_options
from aiida_quantumespresso.utils.convert import convert_input_to_namelist_entry
from .base import CalcJob
class NebCalculation(CalcJob):
"""Nudged Elastic Band code (neb.x) of Quantum ESPRESSO distribution."""
_PREFIX = 'aiida'
# in restarts, will not copy but use symlinks
_default_symlink_usage = False
# Default input and output file names
_DEFAULT_INPUT_FILE = 'neb.dat'
_DEFAULT_OUTPUT_FILE = 'aiida.out'
_PSEUDO_SUBFOLDER = PwCalculation._PSEUDO_SUBFOLDER # pylint: disable=protected-access
_OUTPUT_SUBFOLDER = PwCalculation._OUTPUT_SUBFOLDER # pylint: disable=protected-access
# Keywords that cannot be set (for the PW input)
_blocked_keywords = []
_use_kpoints = True
@classproperty
def _internal_retrieve_list(cls):
# pylint: disable=no-self-argument
# I retrieve them all, even if I don't parse all of them
_neb_ext_list = ['path', 'dat', 'int']
return ['{}.{}'.format(cls._PREFIX, ext) for ext in _neb_ext_list]
@classproperty
def xml_filepaths(cls):
"""Return a list of relative filepaths of XML files."""
# pylint: disable=no-self-argument,not-an-iterable
filepaths = []
for filename in PwCalculation.xml_filenames:
filepath = os.path.join(cls._OUTPUT_SUBFOLDER, cls._PREFIX + '_*[0-9]', cls._PREFIX + '.save', filename)
filepaths.append(filepath)
return filepaths
@classmethod
def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.input('metadata.options.input_filename', valid_type=str, default=cls._DEFAULT_INPUT_FILE)
spec.input('metadata.options.output_filename', valid_type=str, default=cls._DEFAULT_OUTPUT_FILE)
spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.neb')
spec.input('first_structure', valid_type=orm.StructureData, help='Initial structure')
spec.input('last_structure', valid_type=orm.StructureData, help='Final structure')
spec.input('parameters', valid_type=orm.Dict, help='NEB-specific input parameters')
spec.input('settings', valid_type=orm.Dict, required=False,
help='Optional parameters to affect the way the calculation job and the parsing are performed.')
spec.input('parent_folder', valid_type=orm.RemoteData, required=False,
help='An optional working directory of a previously completed calculation to restart from.')
# We reuse some inputs from PwCalculation to construct the PW-specific parts of the input files
spec.expose_inputs(PwCalculation, namespace='pw', include=('parameters', 'pseudos', 'kpoints', 'vdw_table'))
spec.output('output_parameters', valid_type=orm.Dict,
help='The output parameters dictionary of the NEB calculation')
spec.output('output_trajectory', valid_type=orm.TrajectoryData)
spec.output('iteration_array', valid_type=orm.ArrayData, required=False)
spec.output('output_mep', valid_type=orm.ArrayData,
help='The original and interpolated energy profiles along the minimum-energy path (mep)')
spec.default_output_node = 'output_parameters'
spec.exit_code(300, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(303, 'ERROR_MISSING_XML_FILE',
message='The required XML file is not present in the retrieved folder.')
spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ',
message='The stdout output file could not be read.')
spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE',
message='The output file contains invalid output.')
spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE',
message='The stdout output file was incomplete probably because the calculation got interrupted.')
spec.exit_code(320, 'ERROR_OUTPUT_XML_READ',
message='The XML output file could not be read.')
spec.exit_code(321, 'ERROR_OUTPUT_XML_PARSE',
message='The XML output file could not be parsed.')
spec.exit_code(322, 'ERROR_OUTPUT_XML_FORMAT',
message='The XML output file has an unsupported format.')
spec.exit_code(350, 'ERROR_UNEXPECTED_PARSER_EXCEPTION',
message='The parser raised an unexpected exception.')
@classmethod
def _generate_input_files(cls, neb_parameters, settings_dict):
"""Generate the input data for the NEB part of the calculation."""
# I put the first-level keys as uppercase (i.e., namelist and card names)
# and the second-level keys as lowercase
# (deeper levels are unchanged)
input_params = _uppercase_dict(neb_parameters.get_dict(), dict_name='parameters')
input_params = {k: _lowercase_dict(v, dict_name=k) for k, v in input_params.items()}
# Force default values for blocked keywords. NOTE: this is different from PW/CP
for blocked in cls._blocked_keywords:
namelist = blocked[0].upper()
key = blocked[1].lower()
value = blocked[2]
if namelist in input_params:
if key in input_params[namelist]:
raise InputValidationError(
"You cannot specify explicitly the '{}' key in the '{}' namelist.".format(key, namelist)
)
else:
input_params[namelist] = {}
input_params[namelist][key] = value
# Create an empty dictionary for the compulsory namelist 'PATH' if not present
if 'PATH' not in input_params:
input_params['PATH'] = {}
# In case of climbing image, we need the corresponding card
ci_scheme = input_params['PATH'].get('ci_scheme', 'no-ci').lower()
climbing_image_list = settings_dict.pop('CLIMBING_IMAGES', None)
if ci_scheme == 'manual':
manual_climbing_image = True
if climbing_image_list is None:
raise InputValidationError("'ci_scheme' is {}, but no climbing images were specified for this "
'calculation.'.format(ci_scheme))
if not isinstance(climbing_image_list, list):
raise InputValidationError('Climbing images should be provided as a list.')
num_of_images = input_params['PATH'].get('num_of_images', 2)
if any([(i < 2 or i >= num_of_images) for i in climbing_image_list]):
raise InputValidationError('The climbing images should be in the range between the first '
'and the last image (excluded).')
climbing_image_card = 'CLIMBING_IMAGES\n'
climbing_image_card += ', '.join([str(_) for _ in climbing_image_list]) + '\n'
else:
manual_climbing_image = False
if climbing_image_list is not None:
raise InputValidationError("Climbing images are not accepted when 'ci_scheme' is {}.".format(ci_scheme))
input_data = '&PATH\n'
# namelist content; set to {} if not present, so that we leave an empty namelist
namelist = input_params.pop('PATH', {})
for key, value in sorted(namelist.items()):
input_data += convert_input_to_namelist_entry(key, value)
input_data += '/\n'
# Write CI cards now
if manual_climbing_image:
input_data += climbing_image_card
if input_params:
raise InputValidationError(
'The following namelists are specified in input_params, but are '
'not valid namelists for the current type of calculation: '
'{}'.format(','.join(list(input_params.keys()))))
return input_data
def prepare_for_submission(self, folder):
"""Prepare the calculation job for submission by transforming input nodes into input files.
In addition to the input files being written to the sandbox folder, a `CalcInfo` instance will be returned that
contains lists of files that need to be copied to the remote machine before job submission, as well as file
lists that are to be retrieved after job completion.
:param folder: a sandbox folder to temporarily write files on disk.
:return: :py:`~aiida.common.datastructures.CalcInfo` instance.
"""
# pylint: disable=too-many-branches,too-many-statements
import numpy as np
local_copy_list = []
remote_copy_list = []
remote_symlink_list = []
# Convert settings dictionary to have uppercase keys, or create an empty one if none was given.
if 'settings' in self.inputs:
settings_dict = _uppercase_dict(self.inputs.settings.get_dict(), dict_name='settings')
else:
settings_dict = {}
first_structure = self.inputs.first_structure
last_structure = self.inputs.last_structure
# Check that the first and last image have the same cell
if abs(np.array(first_structure.cell)-
np.array(last_structure.cell)).max() > 1.e-4:
raise InputValidationError('Different cell in the fist and last image')
# Check that the first and last image have the same number of sites
if len(first_structure.sites) != len(last_structure.sites):
raise InputValidationError('Different number of sites in the fist and last image')
# Check that sites in the initial and final structure have the same kinds
if first_structure.get_site_kindnames() != last_structure.get_site_kindnames():
raise InputValidationError('Mismatch between the kind names and/or order between '
'the first and final image')
# Check that a pseudo potential was specified for each kind present in the `StructureData`
# self.inputs.pw.pseudos is a plumpy.utils.AttributesFrozendict
kindnames = [kind.name for kind in first_structure.kinds]
if set(kindnames) != set(self.inputs.pw.pseudos.keys()):
raise InputValidationError(
'Mismatch between the defined pseudos and the list of kinds of the structure.\nPseudos: {};\n'
'Kinds: {}'.format(', '.join(list(self.inputs.pw.pseudos.keys())), ', '.join(list(kindnames))))
##############################
# END OF INITIAL INPUT CHECK #
##############################
# Create the subfolder that will contain the pseudopotentials
folder.get_subfolder(self._PSEUDO_SUBFOLDER, create=True)
# Create the subfolder for the output data (sometimes Quantum ESPRESSO codes crash if the folder does not exist)
folder.get_subfolder(self._OUTPUT_SUBFOLDER, create=True)
# We first prepare the NEB-specific input file.
neb_input_filecontent = self._generate_input_files(self.inputs.parameters, settings_dict)
with folder.open(self.inputs.metadata.options.input_filename, 'w') as handle:
handle.write(neb_input_filecontent)
# We now generate the PW input files for each input structure
local_copy_pseudo_list = []
for i, structure in enumerate([first_structure, last_structure]):
# We need to a pass a copy of the settings_dict for each structure
this_settings_dict = copy.deepcopy(settings_dict)
pw_input_filecontent, this_local_copy_pseudo_list = PwCalculation._generate_PWCPinputdata( # pylint: disable=protected-access
self.inputs.pw.parameters, this_settings_dict, self.inputs.pw.pseudos, structure, self.inputs.pw.kpoints
)
local_copy_pseudo_list += this_local_copy_pseudo_list
with folder.open('pw_{}.in'.format(i + 1), 'w') as handle:
handle.write(pw_input_filecontent)
# We need to pop the settings that were used in the PW calculations
for key in list(settings_dict.keys()):
if key not in list(this_settings_dict.keys()):
settings_dict.pop(key)
# We avoid to copy twice the same pseudopotential to the same filename
local_copy_pseudo_list = set(local_copy_pseudo_list)
# We check that two different pseudopotentials are not copied
# with the same name (otherwise the first is overwritten)
if len({filename for (uuid, filename, local_path) in local_copy_pseudo_list}) < len(local_copy_pseudo_list):
raise InputValidationError('Same filename for two different pseudopotentials')
local_copy_list += local_copy_pseudo_list
# If present, add also the Van der Waals table to the pseudo dir. Note that the name of the table is not checked
# but should be the one expected by Quantum ESPRESSO.
vdw_table = self.inputs.get('pw.vdw_table', None)
if vdw_table:
local_copy_list.append((
vdw_table.uuid,
vdw_table.filename,
os.path.join(self._PSEUDO_SUBFOLDER, vdw_table.filename)
))
# operations for restart
parent_calc_folder = self.inputs.get('parent_folder', None)
symlink = settings_dict.pop('PARENT_FOLDER_SYMLINK', self._default_symlink_usage) # a boolean
if symlink:
if parent_calc_folder is not None:
# I put the symlink to the old parent ./out folder
remote_symlink_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
self._OUTPUT_SUBFOLDER, '*'), # asterisk: make individual symlinks for each file
self._OUTPUT_SUBFOLDER
))
# and to the old parent prefix.path
remote_symlink_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
'{}.path'.format(self._PREFIX)),
'{}.path'.format(self._PREFIX)
))
else:
# copy remote output dir and .path file, if specified
if parent_calc_folder is not None:
remote_copy_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
self._OUTPUT_SUBFOLDER, '*'),
self._OUTPUT_SUBFOLDER
))
# and copy the old parent prefix.path
remote_copy_list.append((
parent_calc_folder.computer.uuid,
os.path.join(parent_calc_folder.get_remote_path(),
'{}.path'.format(self._PREFIX)),
'{}.path'.format(self._PREFIX)
))
# here we may create an aiida.EXIT file
create_exit_file = settings_dict.pop('ONLY_INITIALIZATION', False)
if create_exit_file:
exit_filename = '{}.EXIT'.format(self._PREFIX)
with folder.open(exit_filename, 'w') as handle:
handle.write('\n')
calcinfo = CalcInfo()
codeinfo = CodeInfo()
calcinfo.uuid = self.uuid
cmdline_params = settings_dict.pop('CMDLINE', [])
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
calcinfo.remote_symlink_list = remote_symlink_list
# In neb calculations there is no input read from standard input!!
codeinfo.cmdline_params = (['-input_images', '2'] + list(cmdline_params))
codeinfo.stdout_name = self.inputs.metadata.options.output_filename
codeinfo.code_uuid = self.inputs.code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve the output files and the xml files
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self.inputs.metadata.options.output_filename)
calcinfo.retrieve_list.append((
os.path.join(self._OUTPUT_SUBFOLDER, self._PREFIX + '_*[0-9]', 'PW.out'), # source relative path (globbing)
'.', # destination relative path
2 # depth to preserve
))
for xml_filepath in self.xml_filepaths: # pylint: disable=not-an-iterable
calcinfo.retrieve_list.append([xml_filepath, '.', 3])
calcinfo.retrieve_list += settings_dict.pop('ADDITIONAL_RETRIEVE_LIST', [])
calcinfo.retrieve_list += self._internal_retrieve_list
# We might still have parser options in the settings dictionary: pop them.
_pop_parser_options(self, settings_dict)
if settings_dict:
unknown_keys = ', '.join(list(settings_dict.keys()))
raise InputValidationError('`settings` contained unexpected keys: {}'.format(unknown_keys))
return calcinfo
|
the-stack_0_14440 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines an explainable lightgbm model."""
import inspect
import json
import logging
from packaging import version
from scipy.sparse import issparse
from ...common.constants import (ExplainableModelType, Extension,
LightGBMSerializationConstants,
ShapValuesOutput)
from ...common.warnings_suppressor import shap_warnings_suppressor
from .explainable_model import (BaseExplainableModel, _clean_doc,
_get_initializer_args)
from .tree_model_utils import (_expected_values_tree_surrogate,
_explain_local_tree_surrogate)
with shap_warnings_suppressor():
import shap
try:
import lightgbm
from lightgbm import Booster, LGBMClassifier, LGBMRegressor
if (version.parse(lightgbm.__version__) <= version.parse('2.2.1')):
print("Using older than supported version of lightgbm, please upgrade to version greater than 2.2.1")
except ImportError:
print("Could not import lightgbm, required if using LGBMExplainableModel")
DEFAULT_RANDOM_STATE = 123
_N_FEATURES = '_n_features'
_N_CLASSES = '_n_classes'
NUM_ITERATIONS = 'num_iterations'
_FITTED = 'fitted_'
class _LGBMFunctionWrapper(object):
"""Decorate the predict method, temporary workaround for sparse case until TreeExplainer support is added.
:param function: The prediction function to wrap.
:type function: function
"""
def __init__(self, function):
"""Wraps a function to reshape the input data.
:param function: The prediction function to wrap.
:type function: function
"""
self._function = function
def predict_wrapper(self, X, *args, **kwargs):
"""Wraps a prediction function from lightgbm learner.
If version is ==3.0.0, densifies the input dataset.
:param X: The model evaluation examples.
:type X: numpy.ndarray
:return: Prediction result.
:rtype: numpy.ndarray
"""
if issparse(X):
X = X.toarray()
return self._function(X, *args, **kwargs)
class _SparseTreeExplainer(object):
"""Wraps the lightgbm model to enable sparse feature contributions.
If version is >=3.1.0, runs on sparse input data by calling predict function directly.
:param lgbm: The lightgbm model to wrap.
:type lgbm: LGBMModel
:param tree_explainer: The tree_explainer used for dense data.
:type tree_explainer: shap.TreeExplainer
"""
def __init__(self, lgbm, tree_explainer):
"""Wraps the lightgbm model to enable sparse feature contributions.
:param lgbm: The lightgbm model to wrap.
:type lgbm: LGBMModel
:param tree_explainer: The tree_explainer used for dense data.
:type tree_explainer: shap.TreeExplainer
"""
self._lgbm = lgbm
self._tree_explainer = tree_explainer
self._num_iters = -1
# Get the number of iterations trained for from the booster
if hasattr(self._lgbm._Booster, 'params'):
if NUM_ITERATIONS in self._lgbm._Booster.params:
self._num_iters = self._lgbm._Booster.params[NUM_ITERATIONS]
# If best iteration specified, use that
if self._lgbm._best_iteration is not None:
self._num_iters = self._lgbm._best_iteration
self.expected_value = None
def shap_values(self, X):
"""Calls lightgbm predict directly for sparse case.
If lightgbm version is >=3.1.0, runs on sparse input data
by calling predict function directly with pred_contrib=True.
Uses tree explainer for dense input data.
:param X: The model evaluation examples.
:type X: numpy.ndarray or scipy.sparse.csr_matrix
:return: The feature importance values.
:rtype: numpy.ndarray, scipy.sparse or list of scipy.sparse
"""
if issparse(X):
shap_values = self._lgbm.predict(X,
num_iteration=self._num_iters,
pred_contrib=True)
if isinstance(shap_values, list):
shape = shap_values[0].shape
self.expected_value = shap_values[0][0, shape[1] - 1]
for idx, class_values in enumerate(shap_values):
shap_values[idx] = class_values[:, :shape[1] - 1]
else:
shape = shap_values.shape
self.expected_value = shap_values[0, shape[1] - 1]
shap_values = shap_values[:, :shape[1] - 1]
else:
shap_values = self._tree_explainer.shap_values(X)
self.expected_value = self._tree_explainer.expected_value
return shap_values
class LGBMExplainableModel(BaseExplainableModel):
available_explanations = [Extension.GLOBAL, Extension.LOCAL]
explainer_type = Extension.GLASSBOX
"""LightGBM (fast, high performance framework based on decision tree) explainable model.
Please see documentation for more details: https://github.com/Microsoft/LightGBM
Additional arguments to LightGBMClassifier and LightGBMRegressor can be passed through kwargs.
:param multiclass: Set to true to generate a multiclass model.
:type multiclass: bool
:param random_state: Int to seed the model.
:type random_state: int
:param shap_values_output: The type of the output from explain_local when using TreeExplainer.
Currently only types 'default', 'probability' and 'teacher_probability' are supported. If
'probability' is specified, then we approximately scale the raw log-odds values from the
TreeExplainer to probabilities.
:type shap_values_output: interpret_community.common.constants.ShapValuesOutput
:param classification: Indicates if this is a classification or regression explanation.
:type classification: bool
"""
def __init__(self, multiclass=False, random_state=DEFAULT_RANDOM_STATE,
shap_values_output=ShapValuesOutput.DEFAULT, classification=True, **kwargs):
"""Initialize the LightGBM Model.
Additional arguments to LightGBMClassifier and LightGBMRegressor can be passed through kwargs.
:param multiclass: Set to true to generate a multiclass model.
:type multiclass: bool
:param random_state: Int to seed the model.
:type random_state: int
:param shap_values_output: The type of the output from explain_local when using TreeExplainer.
Currently only types 'default', 'probability' and 'teacher_probability' are supported. If
'probability' is specified, then we approximately scale the raw log-odds values from the
TreeExplainer to probabilities.
:type shap_values_output: interpret_community.common.constants.ShapValuesOutput
:param classification: Indicates if this is a classification or regression explanation.
:type classification: bool
"""
self.multiclass = multiclass
initializer_args = _get_initializer_args(kwargs)
if self.multiclass:
initializer = LGBMClassifier
else:
initializer = LGBMRegressor
self._lgbm = initializer(random_state=random_state, **initializer_args)
super(LGBMExplainableModel, self).__init__(**kwargs)
self._logger.debug('Initializing LGBMExplainableModel')
self._method = 'lightgbm'
self._tree_explainer = None
self._shap_values_output = shap_values_output
self._classification = classification
try:
__init__.__doc__ = (__init__.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.__init__.__doc__) +
'\nOtherwise, if multiclass=False, uses the parameters for LGBMRegressor:\n' +
_clean_doc(LGBMRegressor.__init__.__doc__))
except Exception:
pass
def fit(self, dataset, labels, **kwargs):
"""Call lightgbm fit to fit the explainable model.
:param dataset: The dataset to train the model on.
:type dataset: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:param labels: The labels to train the model on.
:type labels: numpy.ndarray
"""
self._lgbm.fit(dataset, labels, **kwargs)
try:
fit.__doc__ = (fit.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.fit.__doc__) +
'\nOtherwise, if multiclass=False, uses the parameters for LGBMRegressor:\n' +
_clean_doc(LGBMRegressor.fit.__doc__))
except Exception:
pass
def predict(self, dataset, **kwargs):
"""Call lightgbm predict to predict labels using the explainable model.
:param dataset: The dataset to predict on.
:type dataset: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:return: The predictions of the model.
:rtype: list
"""
return self._lgbm.predict(dataset, **kwargs)
try:
predict.__doc__ = (predict.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.predict.__doc__) +
'\nOtherwise, if multiclass=False, uses the parameters for LGBMRegressor:\n' +
_clean_doc(LGBMRegressor.predict.__doc__))
except Exception:
pass
def predict_proba(self, dataset, **kwargs):
"""Call lightgbm predict_proba to predict probabilities using the explainable model.
:param dataset: The dataset to predict probabilities on.
:type dataset: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:return: The predictions of the model.
:rtype: list
"""
if self.multiclass:
return self._lgbm.predict_proba(dataset, **kwargs)
else:
raise Exception("predict_proba not supported for regression or binary classification dataset")
try:
predict_proba.__doc__ = (predict_proba.__doc__ +
'\nIf multiclass=True, uses the parameters for LGBMClassifier:\n' +
_clean_doc(LGBMClassifier.predict_proba.__doc__) +
'\nOtherwise predict_proba is not supported for ' +
'regression or binary classification.\n')
except Exception:
pass
def explain_global(self, **kwargs):
"""Call lightgbm feature importances to get the global feature importances from the explainable model.
:return: The global explanation of feature importances.
:rtype: numpy.ndarray
"""
return self._lgbm.feature_importances_
def _init_tree_explainer(self):
"""Creates the TreeExplainer.
Includes a temporary fix for lightgbm 3.0 by wrapping predict method
for sparse case to output dense data.
Includes another temporary fix for lightgbm >= 3.1 to call predict
function directly for sparse input data until shap TreeExplainer
support is added.
"""
if self._tree_explainer is None:
self._tree_explainer = shap.TreeExplainer(self._lgbm)
if version.parse('3.1.0') <= version.parse(lightgbm.__version__):
self._tree_explainer = _SparseTreeExplainer(self._lgbm, self._tree_explainer)
elif version.parse('3.0.0') == version.parse(lightgbm.__version__):
wrapper = _LGBMFunctionWrapper(self._tree_explainer.model.original_model.predict)
self._tree_explainer.model.original_model.predict = wrapper.predict_wrapper
def explain_local(self, evaluation_examples, probabilities=None, **kwargs):
"""Use TreeExplainer to get the local feature importances from the trained explainable model.
:param evaluation_examples: The evaluation examples to compute local feature importances for.
:type evaluation_examples: numpy.ndarray or pandas.DataFrame or scipy.sparse.csr_matrix
:param probabilities: If output_type is probability, can specify the teacher model's
probability for scaling the shap values.
:type probabilities: numpy.ndarray
:return: The local explanation of feature importances.
:rtype: Union[list, numpy.ndarray]
"""
self._init_tree_explainer()
return _explain_local_tree_surrogate(self._lgbm, evaluation_examples, self._tree_explainer,
self._shap_values_output, self._classification,
probabilities, self.multiclass)
@property
def expected_values(self):
"""Use TreeExplainer to get the expected values.
:return: The expected values of the LightGBM tree model.
:rtype: list
"""
self._init_tree_explainer()
return _expected_values_tree_surrogate(self._lgbm, self._tree_explainer, self._shap_values_output,
self._classification, self.multiclass)
@property
def model(self):
"""Retrieve the underlying model.
:return: The lightgbm model, either classifier or regressor.
:rtype: Union[LGBMClassifier, LGBMRegressor]
"""
return self._lgbm
@staticmethod
def explainable_model_type():
"""Retrieve the model type.
:return: Tree explainable model type.
:rtype: ExplainableModelType
"""
return ExplainableModelType.TREE_EXPLAINABLE_MODEL_TYPE
def _save(self):
"""Return a string dictionary representation of the LGBMExplainableModel.
:return: A serialized dictionary representation of the LGBMExplainableModel.
:rtype: dict
"""
properties = {}
# Save all of the properties
for key, value in self.__dict__.items():
if key in LightGBMSerializationConstants.nonify_properties:
properties[key] = None
elif key in LightGBMSerializationConstants.save_properties:
# Save booster model to string representation
# This is not recommended but can be necessary to get around pickle being not secure
# See here for more info:
# https://github.com/Microsoft/LightGBM/issues/1942
# https://github.com/Microsoft/LightGBM/issues/1217
properties[key] = value.booster_.model_to_string()
else:
properties[key] = json.dumps(value)
# Need to add _n_features
properties[_N_FEATURES] = self._lgbm._n_features
# And if classification case need to add _n_classes
if self.multiclass:
properties[_N_CLASSES] = self._lgbm._n_classes
if hasattr(self._lgbm, _FITTED):
properties[_FITTED] = json.dumps(getattr(self._lgbm, _FITTED))
return properties
@staticmethod
def _load(properties):
"""Load a LGBMExplainableModel from the given properties.
:param properties: A serialized dictionary representation of the LGBMExplainableModel.
:type properties: dict
:return: The deserialized LGBMExplainableModel.
:rtype: interpret_community.mimic.models.LGBMExplainableModel
"""
# create the LGBMExplainableModel without any properties using the __new__ function, similar to pickle
lgbm_model = LGBMExplainableModel.__new__(LGBMExplainableModel)
# Get _n_features
_n_features = properties.pop(_N_FEATURES)
# If classification case get _n_classes
if json.loads(properties[LightGBMSerializationConstants.MULTICLASS]):
_n_classes = properties.pop(_N_CLASSES)
fitted_ = None
if _FITTED in properties:
fitted_ = json.loads(properties[_FITTED])
elif version.parse('3.3.1') <= version.parse(lightgbm.__version__):
# If deserializing older model in newer version set this to true to prevent errors on calls
fitted_ = True
# load all of the properties
for key, value in properties.items():
# Regenerate the properties on the fly
if key in LightGBMSerializationConstants.nonify_properties:
if key == LightGBMSerializationConstants.LOGGER:
parent = logging.getLogger(__name__)
lightgbm_identity = json.loads(properties[LightGBMSerializationConstants.IDENTITY])
lgbm_model.__dict__[key] = parent.getChild(lightgbm_identity)
elif key == LightGBMSerializationConstants.TREE_EXPLAINER:
lgbm_model.__dict__[key] = None
else:
raise Exception("Unknown nonify key on deserialize in LightGBMExplainableModel: {}".format(key))
elif key in LightGBMSerializationConstants.save_properties:
# Load the booster from file and re-create the LGBMClassifier or LGBMRegressor
# This is not recommended but can be necessary to get around pickle being not secure
# See here for more info:
# https://github.com/Microsoft/LightGBM/issues/1942
# https://github.com/Microsoft/LightGBM/issues/1217
booster_args = {LightGBMSerializationConstants.MODEL_STR: value}
is_multiclass = json.loads(properties[LightGBMSerializationConstants.MULTICLASS])
if is_multiclass:
objective = LightGBMSerializationConstants.MULTICLASS
else:
objective = LightGBMSerializationConstants.REGRESSION
if LightGBMSerializationConstants.MODEL_STR in inspect.getargspec(Booster).args:
extras = {LightGBMSerializationConstants.OBJECTIVE: objective}
lgbm_booster = Booster(**booster_args, params=extras)
else:
# For backwards compatibility with older versions of lightgbm
booster_args[LightGBMSerializationConstants.OBJECTIVE] = objective
lgbm_booster = Booster(params=booster_args)
if is_multiclass:
new_lgbm = LGBMClassifier()
new_lgbm._Booster = lgbm_booster
new_lgbm._n_classes = _n_classes
else:
new_lgbm = LGBMRegressor()
new_lgbm._Booster = lgbm_booster
# Specify fitted_ for newer versions of lightgbm on deserialize
if fitted_ is not None:
new_lgbm.fitted_ = fitted_
new_lgbm._n_features = _n_features
lgbm_model.__dict__[key] = new_lgbm
elif key in LightGBMSerializationConstants.enum_properties:
# NOTE: If more enums added in future, will need to handle this differently
lgbm_model.__dict__[key] = ShapValuesOutput(json.loads(value))
else:
lgbm_model.__dict__[key] = json.loads(value)
return lgbm_model
|
the-stack_0_14442 | import datetime
def printAppBanner():
print("--------------------------------------")
print(" BIRTHDAY APP ")
print("--------------------------------------")
def getBirthday():
print("What is your birth date?")
year = int(input("Year [YYY]? "))
month = int(input("Month [MM]? "))
day = int(input("Day [DD]? "))
return datetime.date(year, month, day)
def currDate():
return datetime.date.today()
def get_days_difference(current_date, birthday):
#print(current_date)
#print(birthday)
this_year_birthday = datetime.date(current_date.year, birthday.month, birthday.day)
next_year_birthday = datetime.date(current_date.year+1, birthday.month, birthday.day)
#print(this_year_birthday)
diffT = this_year_birthday - current_date
diffN = next_year_birthday - current_date
if (abs(diffT.days) > diffN.days):
return diffN.days
return diffT.days
def printMessage(days_difference):
if days_difference == 0:
print("Today is your birthday, Happy Birthday!!!")
elif days_difference > 0:
print("Looks like your birthday is in {} days".format(days_difference))
else:
print("Looks like your birthday was {} days before".format(abs(days_difference)))
def main():
printAppBanner()
birthday = getBirthday()
current_date = currDate()
days_difference = get_days_difference(current_date, birthday)
printMessage(days_difference)
main()
|
the-stack_0_14444 | '''
Trains a convolutional neural network, using a pre-trained ImageNet model,
to infer the name of a flower given its image.
'''
# -------------------- IMPORT PACKAGES --------------------
import argparse
import os
from copy import deepcopy
from time import time
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
# -------------------- SETUP MAJOR INPUT VALUES --------------------
# Setup major inputs required from command line
parser = argparse.ArgumentParser(description='Trains a neural network')
parser.add_argument('data_directory', type=str,
help='Filepath for input data of format "data_dir/". \
Expected to be the parent directory with \
folders "train", "validation", and "test" inside, with each structured \
according to torchivision.datasets.ImageFolder requirements')
# Setup optional parameters that can be entered from the command line
parser.add_argument('-s', '--save_dir', type=str, metavar='',
default = 'model_checkpoints/',
help = 'Filepath indicating where trained model checkpoint files \
should be stored')
parser.add_argument('-a', '--arch', type=str, metavar='',
default = 'inception',
help = 'Pre-trained model from torchivision.models to use \
for the feature detector layers of your model')
parser.add_argument('-l', '--learning_rate', type=float,
default = 0.0005, metavar='',
help = 'Learning rate to use for the Adam optimizer')
parser.add_argument('-u', '--hidden_units', nargs='+', type=int,
default = [512, 256], metavar='',
help = 'Number of nodes to use in each hidden layer, ordered from \
earliest to latest layer. Not inclusive of the input layer \
(node count dictated by model architecture chosen) and \
output layer (always 102 = number of flower labels). \
Note that usage is --hidden_units count1 count2 count3...')
parser.add_argument('-d', '--dropout', type=bool,
default = True, metavar='',
help = 'Determines if dropout with p=0.2 will be used for \
each hidden layer')
parser.add_argument('-e', '--epochs', type=int,
default = 30, metavar='',
help = 'Number of epochs to use for training and validation')
parser.add_argument('-g', '--gpu', type=bool,
default = True, metavar='',
help = 'If GPU is available, indicates that it should be used')
parser.add_argument('-t', '--quick_test', type=bool,
default = False, metavar='',
help = 'If you just want to test the base code functionality quickly, \
set this to True. Will only load first batch for all training \
and testing.')
args = parser.parse_args()
# -------------------- ARCHITECTURE-SPECIFIC SETUP --------------------
# Sets parameters for various items that are not model-architecture-agnostic
# torchvision.models.inception_v3()
if args.arch == 'inception':
crop_size = 299
model = models.inception_v3(pretrained=True)
#classifier = model.fc
input_nodes = 2048
elif args.arch == 'densenet':
crop_size = 224
model = models.densenet161(pretrained=True)
#classifier = model.classifier
input_nodes = 2208
else:
print("An unsupported model architecture was supplied. \
Program terminating...")
exit()
# Freeze parameters so we don't backprop through the pre-trained
# feature detector
for param in model.parameters():
param.requires_grad = False
# -------------------- DATA LOADING AND TRANSFORMATIONS --------------------
# Initial transformations for cropping should be dictated by
# model architecture chosen (resize should always be the same 512)
# Means and stdevs common for pre-trained networks
means = [0.485, 0.456, 0.406]
stdevs = [0.229, 0.224, 0.225]
data_dir = args.data_directory
# Code here adapted from https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
image_transforms = {'train': \
transforms.Compose([transforms.RandomResizedCrop(crop_size),
transforms.ColorJitter(brightness=0.15,
contrast=0.15,
saturation=0.15,
hue=0),
transforms.RandomAffine(30),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, stdevs)]),
'valid': transforms.Compose([transforms.Resize(512),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(means, stdevs)]),
'test': transforms.Compose([transforms.Resize(512),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(means, stdevs)])
}
phases = ['train', 'valid', 'test']
data = {phase: datasets.ImageFolder(os.path.join(data_dir, phase),
image_transforms[phase]) for phase in phases}
dataloaders = {phase: torch.utils.data.DataLoader(data[phase],
batch_size=64) for phase in phases}
# Set training dataloader to have shuffle = True
dataloaders['train'] = torch.utils.data.DataLoader(data['train'],
batch_size=64, shuffle = True)
# -------------------- CLASSIFIER BUILDING --------------------
# Build classifier portion of convolutional neural net to replace
# original ImageNet classifier
classifier = nn.Sequential()
nodes = args.hidden_units
classifier.add_module('hidden1', nn.Linear(input_nodes, nodes[0]))
for i, _ in enumerate(nodes):
if i+1 >= len(nodes): break
classifier.add_module('activation' + str(i+1), nn.ReLU())
if args.dropout: classifier.add_module('dropout' + str(i+1),
nn.Dropout(0.2))
classifier.add_module('hidden' + str(i+2), nn.Linear(nodes[i], nodes[i+1]))
classifier.add_module('activation' + str(i+1), nn.ReLU())
classifier.add_module('output', nn.Linear(nodes[-1], 102))
classifier.add_module('activation_output', nn.LogSoftmax(dim=1))
if args.arch == 'inception':
model.fc = classifier
model_params = model.fc.parameters()
print(f"Classifier architecture:")
print(model.fc)
elif args.arch == 'densenet':
model.classifier = classifier
model_params = model.classifier.parameters()
print(f"Classifier architecture:")
print(model.classifier)
# -------------------- START EPOCHS --------------------
# If GPU is enabled, set device = 'cuda'. Otherwise use CPU
if torch.cuda.is_available() and args.gpu:
device = torch.device("cuda:0")
elif args.gpu and not torch.cuda.is_available():
print("\nGPU unavailable, using CPU\n")
device = torch.device("cpu")
else:
device = torch.device("cpu")
model.to(device)
# Good loss function to use for LogSoftMax activation layer
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model_params, lr=args.learning_rate)
t0 = time()
# Prep for saving the best epoch's model weights
# Code for this adapted from https://medium.com/datadriveninvestor/creating-a-pytorch-image-classifier-da9db139ba80
from copy import deepcopy
best = {'acc': 0.0, 'epoch': 0, 'weights': deepcopy(model.state_dict())}
epochs = args.epochs
# Used to keep the Udacity online workspace from
# disconnecting/going to sleep
from workspace_utils import keep_awake
# Keep GPU session awake in Udacity workspace until done training
epoch_iter = keep_awake(range(epochs))
#epoch_iter = range(epochs)
for e in epoch_iter:
# -------------------- TRAINING --------------------
# Make sure model is in training mode
model.train()
training_loss = 0
training_batch_counter = 0
for images, labels in dataloaders['train']:
# Move input and label tensors to the GPU or CPU
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(images)
if args.arch == 'inception':
loss = criterion(outputs.logits, labels)
elif args.arch == 'densenet':
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
training_loss += loss.item()
# Monitor every 10 batches and final batch
if training_batch_counter % 10 == 0 or \
training_batch_counter == (len(dataloaders['train']) - 1):
print(f"Training batch {training_batch_counter}\nLoss = \
{training_loss/(training_batch_counter + 1)}\n")
training_batch_counter += 1
if args.quick_test: break
# -------------------- VALIDATION --------------------
# turn off gradients for speedup in validation
with torch.no_grad():
# set model to evaluation mode and remove un-needed things
# like Dropout layers
model.eval()
accuracy = 0
valid_loss = 0
val_batch_counter = 0
for images, labels in dataloaders['valid']:
# Move input and label tensors to the GPU or CPU
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
probs = torch.exp(outputs)
_, top_class = probs.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
valid_loss += loss.item()
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Monitor every 3 batches and final batch
if val_batch_counter % 3 == 0 or \
val_batch_counter == (len(dataloaders['valid']) - 1):
print(f"Validation batch {val_batch_counter}\nLoss = \
{valid_loss/(val_batch_counter + 1)}\n and \
accuracy = {accuracy/(val_batch_counter + 1)}\n")
val_batch_counter += 1
if args.quick_test: break
# -------------------- EPOCH REPORTING --------------------
# Note that normalizing to train/validloader length is due to
# need to divide by batch size to effectively average the
# quantity in question
training_loss /= len(dataloaders['train'])
valid_loss /= len(dataloaders['valid'])
accuracy /= len(dataloaders['valid'])
print(f"For epoch {e+1}/{epochs}...")
print(f"{round((time()-t0)/60, 3)} minutes since training started")
print(f"Training loss = {training_loss}")
print(f"Validation loss = {valid_loss}")
print(f"Accuracy = {accuracy}\n\n")
# Update best accuracy and weights if new superior model is found
if accuracy > best['acc']:
best['acc'] = accuracy
best['epoch'] = e+1
best['weights'] = deepcopy(model.state_dict())
print("Best accuracy updated this epoch \
to {}\n\n\n".format(best['acc']))
if args.quick_test: break
# -------------------- END EPOCHS --------------------
print("Best accuracy found was {} in epoch {}".format(best['acc'],
best['epoch']))
# Set model weights to the optimal ones found across all epochs
# NOTE: you may get an error
# IncompatibleKeys(missing_keys=[], unexpected_keys=[])
# This error can be ignored. Model weights were still set properly.
model.load_state_dict(best['weights'])
# -------------------- TESTING --------------------
# turn off gradients for speedup in testing
with torch.no_grad():
# set model to evaluation mode and remove
# un-needed things like Dropout layers
model.eval()
test_accuracy = 0
test_loss = 0
for images, labels in dataloaders['test']:
# Move input and label tensors to the GPU or CPU
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
probs = torch.exp(outputs)
_, top_class = probs.topk(1, dim = 1)
equals = top_class == labels.view(*top_class.shape)
test_loss += loss.item()
test_accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
if args.quick_test: break
# Note that normalizing to train/validloader length is due to need to
# divide by batch size to effectively average the quantity in question
test_loss /= len(dataloaders['test'])
test_accuracy /= len(dataloaders['test'])
print(f"Testing loss = {test_loss}")
print(f"Testing accuracy = {test_accuracy}\n\n")
# -------------------- SAVING THE MODEL --------------------
# Note that class_to_idx provides the mapping of my folder names to the
# index used in the model
if args.arch == 'inception':
model_arch = models.inception_v3(pretrained=True)
elif args.arch == 'densenet':
model_arch = models.densenet161(pretrained=True)
checkpoint = {
'arch': model_arch,
'classifier': classifier,
'model_state': model.state_dict(),
'epoch_count': best['epoch'],
'training_loss': training_loss,
'validation_accuracy': best['acc'],
'test_loss': test_loss,
'test_accuracy': test_accuracy,
'opt_state': optimizer.state_dict(),
'class_to_idx': data['train'].class_to_idx,
'idx_to_class': {v: k for k,v \
in data['train'].class_to_idx.items()}}
# Determine the highest number X among the existing checkpoints
# which are assumed to have filenames of the format checkpointX.pth
# Code adapted from
# https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory
from os import listdir
from os.path import isfile, join
existing_chkpts = [f for f in listdir(args.save_dir) \
if isfile(join(args.save_dir, f))]
# Code adapted from
# https://stackoverflow.com/questions/4666973/how-to-extract-the-substring-between-two-markers
# Take list of existing checkpoint filenames and
# generate string "checkpointn+1" where n is the highest
# value used for checkpoint filenames. Guarantees we won't
# overwrite an existing checkpoint
import re
file_indices = []
for e in existing_chkpts:
m = re.search('checkpoint(.+).pth', e)
if m:
file_indices.append(int(m.group(1)))
# Check that there are any files of proper name scheme in there at all
if file_indices:
file_n = max(file_indices) + 1
else:
file_n = 0
save_path = args.save_dir + 'checkpoint' + str(file_n) + '.pth'
torch.save(checkpoint, save_path) |
the-stack_0_14446 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import dataclasses
import json
import logging
import re
from collections import defaultdict, OrderedDict
from dataclasses import dataclass, field # pylint: disable=wrong-import-order
from datetime import datetime, timedelta
from typing import (
Any,
cast,
Dict,
Hashable,
List,
NamedTuple,
Optional,
Tuple,
Type,
Union,
)
import pandas as pd
import sqlalchemy as sa
import sqlparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_babel import lazy_gettext as _
from jinja2.exceptions import TemplateError
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
Enum,
ForeignKey,
Integer,
or_,
select,
String,
Table,
Text,
update,
)
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm import backref, Query, relationship, RelationshipProperty, Session
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.sql import column, ColumnElement, literal_column, table, text
from sqlalchemy.sql.elements import ColumnClause
from sqlalchemy.sql.expression import Label, Select, TextAsFrom, TextClause
from sqlalchemy.sql.selectable import Alias, TableClause
from superset import app, db, is_feature_enabled, security_manager
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.connectors.sqla.utils import (
get_physical_table_metadata,
get_virtual_table_metadata,
)
from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression
from superset.exceptions import QueryObjectValidationError
from superset.jinja_context import (
BaseTemplateProcessor,
ExtraCache,
get_template_processor,
)
from superset.models.annotations import Annotation
from superset.models.core import Database
from superset.models.helpers import AuditMixinNullable, QueryResult
from superset.sql_parse import ParsedQuery
from superset.typing import AdhocMetric, Metric, OrderBy, QueryObjectDict
from superset.utils import core as utils
from superset.utils.core import GenericDataType, remove_duplicates
config = app.config
metadata = Model.metadata # pylint: disable=no-member
logger = logging.getLogger(__name__)
VIRTUAL_TABLE_ALIAS = "virtual_table"
class SqlaQuery(NamedTuple):
extra_cache_keys: List[Any]
labels_expected: List[str]
prequeries: List[str]
sqla_query: Select
class QueryStringExtended(NamedTuple):
labels_expected: List[str]
prequeries: List[str]
sql: str
@dataclass
class MetadataResult:
added: List[str] = field(default_factory=list)
removed: List[str] = field(default_factory=list)
modified: List[str] = field(default_factory=list)
class AnnotationDatasource(BaseDatasource):
"""Dummy object so we can query annotations using 'Viz' objects just like
regular datasources.
"""
cache_timeout = 0
changed_on = None
type = "annotation"
column_names = [
"created_on",
"changed_on",
"id",
"start_dttm",
"end_dttm",
"layer_id",
"short_descr",
"long_descr",
"json_metadata",
"created_by_fk",
"changed_by_fk",
]
def query(self, query_obj: QueryObjectDict) -> QueryResult:
error_message = None
qry = db.session.query(Annotation)
qry = qry.filter(Annotation.layer_id == query_obj["filter"][0]["val"])
if query_obj["from_dttm"]:
qry = qry.filter(Annotation.start_dttm >= query_obj["from_dttm"])
if query_obj["to_dttm"]:
qry = qry.filter(Annotation.end_dttm <= query_obj["to_dttm"])
status = utils.QueryStatus.SUCCESS
try:
df = pd.read_sql_query(qry.statement, db.engine)
except Exception as ex: # pylint: disable=broad-except
df = pd.DataFrame()
status = utils.QueryStatus.FAILED
logger.exception(ex)
error_message = utils.error_msg_from_exception(ex)
return QueryResult(
status=status,
df=df,
duration=timedelta(0),
query="",
error_message=error_message,
)
def get_query_str(self, query_obj: QueryObjectDict) -> str:
raise NotImplementedError()
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
raise NotImplementedError()
class TableColumn(Model, BaseColumn):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = "table_columns"
__table_args__ = (UniqueConstraint("table_id", "column_name"),)
table_id = Column(Integer, ForeignKey("tables.id"))
table = relationship(
"SqlaTable",
backref=backref("columns", cascade="all, delete-orphan"),
foreign_keys=[table_id],
)
is_dttm = Column(Boolean, default=False)
expression = Column(Text)
python_date_format = Column(String(255))
export_fields = [
"table_id",
"column_name",
"verbose_name",
"is_dttm",
"is_active",
"type",
"groupby",
"filterable",
"expression",
"description",
"python_date_format",
]
update_from_object_fields = [s for s in export_fields if s not in ("table_id",)]
export_parent = "table"
@property
def is_boolean(self) -> bool:
"""
Check if the column has a boolean datatype.
"""
return self.type_generic == GenericDataType.BOOLEAN
@property
def is_numeric(self) -> bool:
"""
Check if the column has a numeric datatype.
"""
return self.type_generic == GenericDataType.NUMERIC
@property
def is_string(self) -> bool:
"""
Check if the column has a string datatype.
"""
return self.type_generic == GenericDataType.STRING
@property
def is_temporal(self) -> bool:
"""
Check if the column has a temporal datatype. If column has been set as
temporal/non-temporal (`is_dttm` is True or False respectively), return that
value. This usually happens during initial metadata fetching or when a column
is manually set as temporal (for this `python_date_format` needs to be set).
"""
if self.is_dttm is not None:
return self.is_dttm
return self.type_generic == GenericDataType.TEMPORAL
@property
def db_engine_spec(self) -> Type[BaseEngineSpec]:
return self.table.db_engine_spec
@property
def type_generic(self) -> Optional[utils.GenericDataType]:
if self.is_dttm:
return GenericDataType.TEMPORAL
column_spec = self.db_engine_spec.get_column_spec(self.type)
return column_spec.generic_type if column_spec else None
def get_sqla_col(self, label: Optional[str] = None) -> Column:
label = label or self.column_name
db_engine_spec = self.db_engine_spec
column_spec = db_engine_spec.get_column_spec(self.type)
type_ = column_spec.sqla_type if column_spec else None
if self.expression:
tp = self.table.get_template_processor()
expression = tp.process_template(self.expression)
col = literal_column(expression, type_=type_)
else:
col = column(self.column_name, type_=type_)
col = self.table.make_sqla_column_compatible(col, label)
return col
@property
def datasource(self) -> RelationshipProperty:
return self.table
def get_time_filter(
self,
start_dttm: DateTime,
end_dttm: DateTime,
time_range_endpoints: Optional[
Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint]
],
) -> ColumnElement:
col = self.get_sqla_col(label="__time")
l = []
if start_dttm:
l.append(
col >= text(self.dttm_sql_literal(start_dttm, time_range_endpoints))
)
if end_dttm:
if (
time_range_endpoints
and time_range_endpoints[1] == utils.TimeRangeEndpoint.EXCLUSIVE
):
l.append(
col < text(self.dttm_sql_literal(end_dttm, time_range_endpoints))
)
else:
l.append(col <= text(self.dttm_sql_literal(end_dttm, None)))
return and_(*l)
def get_timestamp_expression(
self, time_grain: Optional[str], label: Optional[str] = None
) -> Union[TimestampExpression, Label]:
"""
Return a SQLAlchemy Core element representation of self to be used in a query.
:param time_grain: Optional time grain, e.g. P1Y
:param label: alias/label that column is expected to have
:return: A TimeExpression object wrapped in a Label if supported by db
"""
label = label or utils.DTTM_ALIAS
pdf = self.python_date_format
is_epoch = pdf in ("epoch_s", "epoch_ms")
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
if self.expression:
col = literal_column(self.expression)
else:
col = column(self.column_name)
time_expr = self.db_engine_spec.get_timestamp_expr(
col, pdf, time_grain, self.type
)
return self.table.make_sqla_column_compatible(time_expr, label)
def dttm_sql_literal(
self,
dttm: DateTime,
time_range_endpoints: Optional[
Tuple[utils.TimeRangeEndpoint, utils.TimeRangeEndpoint]
],
) -> str:
"""Convert datetime object to a SQL expression string"""
dttm_type = self.type or ("DATETIME" if self.is_dttm else None)
sql = self.db_engine_spec.convert_dttm(dttm_type, dttm) if dttm_type else None
if sql:
return sql
tf = self.python_date_format
# Fallback to the default format (if defined) only if the SIP-15 time range
# endpoints, i.e., [start, end) are enabled.
if not tf and time_range_endpoints == (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
):
tf = (
self.table.database.get_extra()
.get("python_date_format_by_column_name", {})
.get(self.column_name)
)
if tf:
if tf in ["epoch_ms", "epoch_s"]:
seconds_since_epoch = int(dttm.timestamp())
if tf == "epoch_s":
return str(seconds_since_epoch)
return str(seconds_since_epoch * 1000)
return f"'{dttm.strftime(tf)}'"
# TODO(john-bodley): SIP-15 will explicitly require a type conversion.
return f"""'{dttm.strftime("%Y-%m-%d %H:%M:%S.%f")}'"""
@property
def data(self) -> Dict[str, Any]:
attrs = (
"id",
"column_name",
"verbose_name",
"description",
"expression",
"filterable",
"groupby",
"is_dttm",
"type",
"type_generic",
"python_date_format",
)
return {s: getattr(self, s) for s in attrs if hasattr(self, s)}
class SqlMetric(Model, BaseMetric):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = "sql_metrics"
__table_args__ = (UniqueConstraint("table_id", "metric_name"),)
table_id = Column(Integer, ForeignKey("tables.id"))
table = relationship(
"SqlaTable",
backref=backref("metrics", cascade="all, delete-orphan"),
foreign_keys=[table_id],
)
expression = Column(Text, nullable=False)
extra = Column(Text)
export_fields = [
"metric_name",
"verbose_name",
"metric_type",
"table_id",
"expression",
"description",
"d3format",
"extra",
"warning_text",
]
update_from_object_fields = list(
[s for s in export_fields if s not in ("table_id",)]
)
export_parent = "table"
def get_sqla_col(self, label: Optional[str] = None) -> Column:
label = label or self.metric_name
tp = self.table.get_template_processor()
sqla_col: ColumnClause = literal_column(tp.process_template(self.expression))
return self.table.make_sqla_column_compatible(sqla_col, label)
@property
def perm(self) -> Optional[str]:
return (
("{parent_name}.[{obj.metric_name}](id:{obj.id})").format(
obj=self, parent_name=self.table.full_name
)
if self.table
else None
)
def get_perm(self) -> Optional[str]:
return self.perm
def get_extra_dict(self) -> Dict[str, Any]:
try:
return json.loads(self.extra)
except (TypeError, json.JSONDecodeError):
return {}
@property
def is_certified(self) -> bool:
return bool(self.get_extra_dict().get("certification"))
@property
def certified_by(self) -> Optional[str]:
return self.get_extra_dict().get("certification", {}).get("certified_by")
@property
def certification_details(self) -> Optional[str]:
return self.get_extra_dict().get("certification", {}).get("details")
@property
def warning_markdown(self) -> Optional[str]:
return self.get_extra_dict().get("warning_markdown")
@property
def data(self) -> Dict[str, Any]:
attrs = (
"is_certified",
"certified_by",
"certification_details",
"warning_markdown",
)
attr_dict = {s: getattr(self, s) for s in attrs}
attr_dict.update(super().data)
return attr_dict
sqlatable_user = Table(
"sqlatable_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("table_id", Integer, ForeignKey("tables.id")),
)
class SqlaTable( # pylint: disable=too-many-public-methods,too-many-instance-attributes
Model, BaseDatasource
):
"""An ORM object for SqlAlchemy table references"""
type = "table"
query_language = "sql"
is_rls_supported = True
columns: List[TableColumn] = []
metrics: List[SqlMetric] = []
metric_class = SqlMetric
column_class = TableColumn
owner_class = security_manager.user_model
__tablename__ = "tables"
# Note this uniqueness constraint is not part of the physical schema, i.e., it does
# not exist in the migrations, but is required by `import_from_dict` to ensure the
# correct filters are applied in order to identify uniqueness.
#
# The reason it does not physically exist is MySQL, PostgreSQL, etc. have a
# different interpretation of uniqueness when it comes to NULL which is problematic
# given the schema is optional.
__table_args__ = (UniqueConstraint("database_id", "schema", "table_name"),)
table_name = Column(String(250), nullable=False)
main_dttm_col = Column(String(250))
database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
fetch_values_predicate = Column(String(1000))
owners = relationship(owner_class, secondary=sqlatable_user, backref="tables")
database: Database = relationship(
"Database",
backref=backref("tables", cascade="all, delete-orphan"),
foreign_keys=[database_id],
)
schema = Column(String(255))
sql = Column(Text)
is_sqllab_view = Column(Boolean, default=False)
template_params = Column(Text)
extra = Column(Text)
baselink = "tablemodelview"
export_fields = [
"table_name",
"main_dttm_col",
"description",
"default_endpoint",
"database_id",
"offset",
"cache_timeout",
"schema",
"sql",
"params",
"template_params",
"filter_select_enabled",
"fetch_values_predicate",
"extra",
]
update_from_object_fields = [f for f in export_fields if f != "database_id"]
export_parent = "database"
export_children = ["metrics", "columns"]
sqla_aggregations = {
"COUNT_DISTINCT": lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
"COUNT": sa.func.COUNT,
"SUM": sa.func.SUM,
"AVG": sa.func.AVG,
"MIN": sa.func.MIN,
"MAX": sa.func.MAX,
}
def __repr__(self) -> str:
return self.name
@property
def db_engine_spec(self) -> Type[BaseEngineSpec]:
return self.database.db_engine_spec
@property
def changed_by_name(self) -> str:
if not self.changed_by:
return ""
return str(self.changed_by)
@property
def changed_by_url(self) -> str:
if not self.changed_by:
return ""
return f"/superset/profile/{self.changed_by.username}"
@property
def connection(self) -> str:
return str(self.database)
@property
def description_markeddown(self) -> str:
return utils.markdown(self.description)
@property
def datasource_name(self) -> str:
return self.table_name
@property
def datasource_type(self) -> str:
return self.type
@property
def database_name(self) -> str:
return self.database.name
@classmethod
def get_datasource_by_name(
cls,
session: Session,
datasource_name: str,
schema: Optional[str],
database_name: str,
) -> Optional["SqlaTable"]:
schema = schema or None
query = (
session.query(cls)
.join(Database)
.filter(cls.table_name == datasource_name)
.filter(Database.database_name == database_name)
)
# Handling schema being '' or None, which is easier to handle
# in python than in the SQLA query in a multi-dialect way
for tbl in query.all():
if schema == (tbl.schema or None):
return tbl
return None
@property
def link(self) -> Markup:
name = escape(self.name)
anchor = f'<a target="_blank" href="{self.explore_url}">{name}</a>'
return Markup(anchor)
def get_schema_perm(self) -> Optional[str]:
"""Returns schema permission if present, database one otherwise."""
return security_manager.get_schema_perm(self.database, self.schema)
def get_perm(self) -> str:
return f"[{self.database}].[{self.table_name}](id:{self.id})"
@property
def name(self) -> str:
if not self.schema:
return self.table_name
return "{}.{}".format(self.schema, self.table_name)
@property
def full_name(self) -> str:
return utils.get_datasource_full_name(
self.database, self.table_name, schema=self.schema
)
@property
def dttm_cols(self) -> List[str]:
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col and self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self) -> List[str]:
return [c.column_name for c in self.columns if c.is_numeric]
@property
def any_dttm_col(self) -> Optional[str]:
cols = self.dttm_cols
return cols[0] if cols else None
@property
def html(self) -> str:
df = pd.DataFrame((c.column_name, c.type) for c in self.columns)
df.columns = ["field", "type"]
return df.to_html(
index=False,
classes=("dataframe table table-striped table-bordered " "table-condensed"),
)
@property
def sql_url(self) -> str:
return self.database.sql_url + "?table_name=" + str(self.table_name)
def external_metadata(self) -> List[Dict[str, str]]:
if self.sql:
return get_virtual_table_metadata(dataset=self)
return get_physical_table_metadata(
database=self.database, table_name=self.table_name, schema_name=self.schema,
)
@property
def time_column_grains(self) -> Dict[str, Any]:
return {
"time_columns": self.dttm_cols,
"time_grains": [grain.name for grain in self.database.grains()],
}
@property
def select_star(self) -> Optional[str]:
# show_cols and latest_partition set to false to avoid
# the expensive cost of inspecting the DB
return self.database.select_star(
self.table_name, schema=self.schema, show_cols=False, latest_partition=False
)
@property
def health_check_message(self) -> Optional[str]:
check = config["DATASET_HEALTH_CHECK"]
return check(self) if check else None
@property
def data(self) -> Dict[str, Any]:
data_ = super().data
if self.type == "table":
data_["granularity_sqla"] = utils.choicify(self.dttm_cols)
data_["time_grain_sqla"] = [
(g.duration, g.name) for g in self.database.grains() or []
]
data_["main_dttm_col"] = self.main_dttm_col
data_["fetch_values_predicate"] = self.fetch_values_predicate
data_["template_params"] = self.template_params
data_["is_sqllab_view"] = self.is_sqllab_view
data_["health_check_message"] = self.health_check_message
data_["extra"] = self.extra
return data_
@property
def extra_dict(self) -> Dict[str, Any]:
try:
return json.loads(self.extra)
except (TypeError, json.JSONDecodeError):
return {}
def get_fetch_values_predicate(self) -> TextClause:
tp = self.get_template_processor()
try:
return text(tp.process_template(self.fetch_values_predicate))
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in fetch values predicate: %(msg)s",
msg=ex.message,
)
)
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
qry = (
select([target_col.get_sqla_col()])
.select_from(self.get_from_clause(tp))
.distinct()
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
qry = qry.where(self.get_fetch_values_predicate())
engine = self.database.get_sqla_engine()
sql = "{}".format(qry.compile(engine, compile_kwargs={"literal_binds": True}))
sql = self.mutate_query_from_config(sql)
df = pd.read_sql_query(sql=sql, con=engine)
return df[column_name].to_list()
def mutate_query_from_config(self, sql: str) -> str:
"""Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context"""
sql_query_mutator = config["SQL_QUERY_MUTATOR"]
if sql_query_mutator:
username = utils.get_username()
sql = sql_query_mutator(sql, username, security_manager, self.database)
return sql
def get_template_processor(self, **kwargs: Any) -> BaseTemplateProcessor:
return get_template_processor(table=self, database=self.database, **kwargs)
def get_query_str_extended(self, query_obj: QueryObjectDict) -> QueryStringExtended:
sqlaq = self.get_sqla_query(**query_obj)
sql = self.database.compile_sqla_query(sqlaq.sqla_query)
sql = sqlparse.format(sql, reindent=True)
sql = self.mutate_query_from_config(sql)
return QueryStringExtended(
labels_expected=sqlaq.labels_expected, sql=sql, prequeries=sqlaq.prequeries
)
def get_query_str(self, query_obj: QueryObjectDict) -> str:
query_str_ext = self.get_query_str_extended(query_obj)
all_queries = query_str_ext.prequeries + [query_str_ext.sql]
return ";\n\n".join(all_queries) + ";"
def get_sqla_table(self) -> TableClause:
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
return tbl
def get_from_clause(
self, template_processor: Optional[BaseTemplateProcessor] = None
) -> Union[TableClause, Alias]:
"""
Return where to select the columns and metrics from. Either a physical table
or a virtual table with it's own subquery.
"""
if not self.is_virtual:
return self.get_sqla_table()
from_sql = self.get_rendered_sql(template_processor)
parsed_query = ParsedQuery(from_sql)
if not (
parsed_query.is_unknown()
or self.db_engine_spec.is_readonly_query(parsed_query)
):
raise QueryObjectValidationError(
_("Virtual dataset query must be read-only")
)
return TextAsFrom(sa.text(from_sql), []).alias(VIRTUAL_TABLE_ALIAS)
def get_rendered_sql(
self, template_processor: Optional[BaseTemplateProcessor] = None
) -> str:
"""
Render sql with template engine (Jinja).
"""
sql = self.sql
if template_processor:
try:
sql = template_processor.process_template(sql)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error while rendering virtual dataset query: %(msg)s",
msg=ex.message,
)
)
sql = sqlparse.format(sql.strip("\t\r\n; "), strip_comments=True)
if not sql:
raise QueryObjectValidationError(_("Virtual dataset query cannot be empty"))
if len(sqlparse.split(sql)) > 1:
raise QueryObjectValidationError(
_("Virtual dataset query cannot consist of multiple statements")
)
return sql
def adhoc_metric_to_sqla(
self, metric: AdhocMetric, columns_by_name: Dict[str, TableColumn]
) -> ColumnElement:
"""
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict columns_by_name: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
"""
expression_type = metric.get("expressionType")
label = utils.get_metric_name(metric)
if expression_type == utils.AdhocMetricExpressionType.SIMPLE:
column_name = cast(str, metric["column"].get("column_name"))
table_column: Optional[TableColumn] = columns_by_name.get(column_name)
if table_column:
sqla_column = table_column.get_sqla_col()
else:
sqla_column = column(column_name)
sqla_metric = self.sqla_aggregations[metric["aggregate"]](sqla_column)
elif expression_type == utils.AdhocMetricExpressionType.SQL:
tp = self.get_template_processor()
expression = tp.process_template(cast(str, metric["sqlExpression"]))
sqla_metric = literal_column(expression)
else:
raise QueryObjectValidationError("Adhoc metric expressionType is invalid")
return self.make_sqla_column_compatible(sqla_metric, label)
def make_sqla_column_compatible(
self, sqla_col: ColumnElement, label: Optional[str] = None
) -> ColumnElement:
"""Takes a sqlalchemy column object and adds label info if supported by engine.
:param sqla_col: sqlalchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
"""
label_expected = label or sqla_col.name
db_engine_spec = self.db_engine_spec
# add quotes to tables
if db_engine_spec.allows_alias_in_select:
label = db_engine_spec.make_label_compatible(label_expected)
sqla_col = sqla_col.label(label)
sqla_col.key = label_expected
return sqla_col
def make_orderby_compatible(
self, select_exprs: List[ColumnElement], orderby_exprs: List[ColumnElement]
) -> None:
"""
If needed, make sure aliases for selected columns are not used in
`ORDER BY`.
In some databases (e.g. Presto), `ORDER BY` clause is not able to
automatically pick the source column if a `SELECT` clause alias is named
the same as a source column. In this case, we update the SELECT alias to
another name to avoid the conflict.
"""
if self.db_engine_spec.allows_alias_to_source_column:
return
def is_alias_used_in_orderby(col: ColumnElement) -> bool:
if not isinstance(col, Label):
return False
regexp = re.compile(f"\\(.*\\b{re.escape(col.name)}\\b.*\\)", re.IGNORECASE)
return any(regexp.search(str(x)) for x in orderby_exprs)
# Iterate through selected columns, if column alias appears in orderby
# use another `alias`. The final output columns will still use the
# original names, because they are updated by `labels_expected` after
# querying.
for col in select_exprs:
if is_alias_used_in_orderby(col):
col.name = f"{col.name}__"
def _get_sqla_row_level_filters(
self, template_processor: BaseTemplateProcessor
) -> List[str]:
"""
Return the appropriate row level security filters for
this table and the current user.
:param BaseTemplateProcessor template_processor: The template
processor to apply to the filters.
:returns: A list of SQL clauses to be ANDed together.
:rtype: List[str]
"""
filters_grouped: Dict[Union[int, str], List[str]] = defaultdict(list)
try:
for filter_ in security_manager.get_rls_filters(self):
clause = text(
f"({template_processor.process_template(filter_.clause)})"
)
filters_grouped[filter_.group_key or filter_.id].append(clause)
return [or_(*clauses) for clauses in filters_grouped.values()]
except TemplateError as ex:
raise QueryObjectValidationError(
_("Error in jinja expression in RLS filters: %(msg)s", msg=ex.message,)
)
def get_sqla_query( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
self,
metrics: Optional[List[Metric]] = None,
granularity: Optional[str] = None,
from_dttm: Optional[datetime] = None,
to_dttm: Optional[datetime] = None,
columns: Optional[List[str]] = None,
groupby: Optional[List[str]] = None,
filter: Optional[ # pylint: disable=redefined-builtin
List[Dict[str, Any]]
] = None,
is_timeseries: bool = True,
timeseries_limit: int = 15,
timeseries_limit_metric: Optional[Metric] = None,
row_limit: Optional[int] = None,
row_offset: Optional[int] = None,
inner_from_dttm: Optional[datetime] = None,
inner_to_dttm: Optional[datetime] = None,
orderby: Optional[List[OrderBy]] = None,
extras: Optional[Dict[str, Any]] = None,
order_desc: bool = True,
is_rowcount: bool = False,
apply_fetch_values_predicate: bool = False,
) -> SqlaQuery:
"""Querying any sqla table from this common interface"""
template_kwargs = {
"from_dttm": from_dttm.isoformat() if from_dttm else None,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"row_offset": row_offset,
"to_dttm": to_dttm.isoformat() if to_dttm else None,
"filter": filter,
"columns": [col.column_name for col in self.columns],
}
template_kwargs.update(self.template_params_dict)
extra_cache_keys: List[Any] = []
template_kwargs["extra_cache_keys"] = extra_cache_keys
removed_filters: List[str] = []
template_kwargs["removed_filters"] = removed_filters
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.db_engine_spec
prequeries: List[str] = []
orderby = orderby or []
extras = extras or {}
need_groupby = bool(metrics is not None or groupby)
metrics = metrics or []
# For backward compatibility
if granularity not in self.dttm_cols and granularity is not None:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
columns_by_name: Dict[str, TableColumn] = {
col.column_name: col for col in self.columns
}
metrics_by_name: Dict[str, SqlMetric] = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise QueryObjectValidationError(
_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"
)
)
if not metrics and not columns and not groupby:
raise QueryObjectValidationError(_("Empty query?"))
metrics_exprs: List[ColumnElement] = []
for metric in metrics:
if utils.is_adhoc_metric(metric):
assert isinstance(metric, dict)
metrics_exprs.append(self.adhoc_metric_to_sqla(metric, columns_by_name))
elif isinstance(metric, str) and metric in metrics_by_name:
metrics_exprs.append(metrics_by_name[metric].get_sqla_col())
else:
raise QueryObjectValidationError(
_("Metric '%(metric)s' does not exist", metric=metric)
)
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
# To ensure correct handling of the ORDER BY labeling we need to reference the
# metric instance if defined in the SELECT clause.
# use the key of the ColumnClause for the expected label
metrics_exprs_by_label = {m.key: m for m in metrics_exprs}
metrics_exprs_by_expr = {str(m): m for m in metrics_exprs}
# Since orderby may use adhoc metrics, too; we need to process them first
orderby_exprs: List[ColumnElement] = []
for orig_col, ascending in orderby:
col: Union[AdhocMetric, ColumnElement] = orig_col
if isinstance(col, dict):
col = cast(AdhocMetric, col)
if utils.is_adhoc_metric(col):
# add adhoc sort by column to columns_by_name if not exists
col = self.adhoc_metric_to_sqla(col, columns_by_name)
# if the adhoc metric has been defined before
# use the existing instance.
col = metrics_exprs_by_expr.get(str(col), col)
need_groupby = True
elif col in columns_by_name:
col = columns_by_name[col].get_sqla_col()
elif col in metrics_exprs_by_label:
col = metrics_exprs_by_label[col]
need_groupby = True
elif col in metrics_by_name:
col = metrics_by_name[col].get_sqla_col()
need_groupby = True
if isinstance(col, ColumnElement):
orderby_exprs.append(col)
else:
# Could not convert a column reference to valid ColumnElement
raise QueryObjectValidationError(
_("Unknown column used in orderby: %(col)s", col=orig_col)
)
select_exprs: List[Union[Column, Label]] = []
groupby_exprs_sans_timestamp = OrderedDict()
# filter out the pseudo column __timestamp from columns
columns = columns or []
columns = [col for col in columns if col != utils.DTTM_ALIAS]
if need_groupby:
# dedup columns while preserving order
columns = groupby or columns
for selected in columns:
# if groupby field/expr equals granularity field/expr
if selected == granularity:
time_grain = extras.get("time_grain_sqla")
sqla_col = columns_by_name[selected]
outer = sqla_col.get_timestamp_expression(time_grain, selected)
# if groupby field equals a selected column
elif selected in columns_by_name:
outer = columns_by_name[selected].get_sqla_col()
else:
outer = literal_column(f"({selected})")
outer = self.make_sqla_column_compatible(outer, selected)
groupby_exprs_sans_timestamp[outer.name] = outer
select_exprs.append(outer)
elif columns:
for selected in columns:
select_exprs.append(
columns_by_name[selected].get_sqla_col()
if selected in columns_by_name
else self.make_sqla_column_compatible(literal_column(selected))
)
metrics_exprs = []
time_range_endpoints = extras.get("time_range_endpoints")
groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
if granularity:
if granularity not in columns_by_name:
raise QueryObjectValidationError(
_(
'Time column "%(col)s" does not exist in dataset',
col=granularity,
)
)
dttm_col = columns_by_name[granularity]
time_grain = extras.get("time_grain_sqla")
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
# always put timestamp as the first column
select_exprs.insert(0, timestamp)
groupby_exprs_with_timestamp[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns.
if (
db_engine_spec.time_secondary_columns
and self.main_dttm_col in self.dttm_cols
and self.main_dttm_col != dttm_col.column_name
):
time_filters.append(
columns_by_name[self.main_dttm_col].get_time_filter(
from_dttm, to_dttm, time_range_endpoints
)
)
time_filters.append(
dttm_col.get_time_filter(from_dttm, to_dttm, time_range_endpoints)
)
# Always remove duplicates by column name, as sometimes `metrics_exprs`
# can have the same name as a groupby column (e.g. when users use
# raw columns as custom SQL adhoc metric).
select_exprs = remove_duplicates(
select_exprs + metrics_exprs, key=lambda x: x.name
)
# Expected output columns
labels_expected = [c.key for c in select_exprs]
# Order by columns are "hidden" columns, some databases require them
# always be present in SELECT if an aggregation function is used
if not db_engine_spec.allows_hidden_ordeby_agg:
select_exprs = remove_duplicates(select_exprs + orderby_exprs)
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor)
if groupby_exprs_with_timestamp:
qry = qry.group_by(*groupby_exprs_with_timestamp.values())
where_clause_and = []
having_clause_and = []
for flt in filter: # type: ignore
if not all([flt.get(s) for s in ["col", "op"]]):
continue
col = flt["col"]
val = flt.get("val")
op = flt["op"].upper()
col_obj = columns_by_name.get(col)
if is_feature_enabled("ENABLE_TEMPLATE_REMOVE_FILTERS"):
if col in removed_filters:
# Skip generating SQLA filter when the jinja template handles it.
continue
if col_obj:
col_spec = db_engine_spec.get_column_spec(col_obj.type)
is_list_target = op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
)
if col_spec:
target_type = col_spec.generic_type
else:
target_type = GenericDataType.STRING
eq = self.filter_values_handler(
values=val,
target_column_type=target_type,
is_list_target=is_list_target,
)
if is_list_target:
assert isinstance(eq, (tuple, list))
if len(eq) == 0:
raise QueryObjectValidationError(
_("Filter value list cannot be empty")
)
if None in eq:
eq = [x for x in eq if x is not None]
is_null_cond = col_obj.get_sqla_col().is_(None)
if eq:
cond = or_(is_null_cond, col_obj.get_sqla_col().in_(eq))
else:
cond = is_null_cond
else:
cond = col_obj.get_sqla_col().in_(eq)
if op == utils.FilterOperator.NOT_IN.value:
cond = ~cond
where_clause_and.append(cond)
elif op == utils.FilterOperator.IS_NULL.value:
where_clause_and.append(col_obj.get_sqla_col().is_(None))
elif op == utils.FilterOperator.IS_NOT_NULL.value:
where_clause_and.append(col_obj.get_sqla_col().isnot(None))
elif op == utils.FilterOperator.IS_TRUE.value:
where_clause_and.append(col_obj.get_sqla_col().is_(True))
elif op == utils.FilterOperator.IS_FALSE.value:
where_clause_and.append(col_obj.get_sqla_col().is_(False))
else:
if eq is None:
raise QueryObjectValidationError(
_(
"Must specify a value for filters "
"with comparison operators"
)
)
if op == utils.FilterOperator.EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() == eq)
elif op == utils.FilterOperator.NOT_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() != eq)
elif op == utils.FilterOperator.GREATER_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() > eq)
elif op == utils.FilterOperator.LESS_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() < eq)
elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() >= eq)
elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() <= eq)
elif op == utils.FilterOperator.LIKE.value:
where_clause_and.append(col_obj.get_sqla_col().like(eq))
elif op == utils.FilterOperator.ILIKE.value:
where_clause_and.append(col_obj.get_sqla_col().ilike(eq))
else:
raise QueryObjectValidationError(
_("Invalid filter operation type: %(op)s", op=op)
)
if is_feature_enabled("ROW_LEVEL_SECURITY"):
where_clause_and += self._get_sqla_row_level_filters(template_processor)
if extras:
where = extras.get("where")
if where:
try:
where = template_processor.process_template(where)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in WHERE clause: %(msg)s",
msg=ex.message,
)
)
where_clause_and += [sa.text("({})".format(where))]
having = extras.get("having")
if having:
try:
having = template_processor.process_template(having)
except TemplateError as ex:
raise QueryObjectValidationError(
_(
"Error in jinja expression in HAVING clause: %(msg)s",
msg=ex.message,
)
)
having_clause_and += [sa.text("({})".format(having))]
if apply_fetch_values_predicate and self.fetch_values_predicate:
qry = qry.where(self.get_fetch_values_predicate())
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
self.make_orderby_compatible(select_exprs, orderby_exprs)
for col, (orig_col, ascending) in zip(orderby_exprs, orderby):
if not db_engine_spec.allows_alias_in_orderby and isinstance(col, Label):
# if engine does not allow using SELECT alias in ORDER BY
# revert to the underlying column
col = col.element
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if row_offset:
qry = qry.offset(row_offset)
if (
is_timeseries # pylint: disable=too-many-boolean-expressions
and timeseries_limit
and not time_groupby_inline
and groupby_exprs_sans_timestamp
):
if db_engine_spec.allows_joins:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = self.make_sqla_column_compatible(
main_metric_expr, "mme_inner__"
)
inner_groupby_exprs = []
inner_select_exprs = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs).select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
time_range_endpoints,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric:
ob = self._get_timeseries_orderby(
timeseries_limit_metric, metrics_by_name, columns_by_name
)
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
# in this case the column name, not the alias, needs to be
# conditionally mutated, as it refers to the column alias in
# the inner query
col_name = db_engine_spec.make_label_compatible(gby_name + "__")
on_clause.append(gby_obj == column(col_name))
tbl = tbl.join(subq.alias(), and_(*on_clause))
else:
if timeseries_limit_metric:
orderby = [
(
self._get_timeseries_orderby(
timeseries_limit_metric,
metrics_by_name,
columns_by_name,
),
False,
)
]
# run prequery to get top groups
prequery_obj = {
"is_timeseries": False,
"row_limit": timeseries_limit,
"metrics": metrics,
"granularity": granularity,
"groupby": groupby,
"from_dttm": inner_from_dttm or from_dttm,
"to_dttm": inner_to_dttm or to_dttm,
"filter": filter,
"orderby": orderby,
"extras": extras,
"columns": columns,
"order_desc": True,
}
result = self.query(prequery_obj)
prequeries.append(result.query)
dimensions = [
c
for c in result.df.columns
if c not in metrics and c in groupby_exprs_sans_timestamp
]
top_groups = self._get_top_groups(
result.df, dimensions, groupby_exprs_sans_timestamp
)
qry = qry.where(top_groups)
qry = qry.select_from(tbl)
if is_rowcount:
if not db_engine_spec.allows_subqueries:
raise QueryObjectValidationError(
_("Database does not support subqueries")
)
label = "rowcount"
col = self.make_sqla_column_compatible(literal_column("COUNT(*)"), label)
qry = select([col]).select_from(qry.alias("rowcount_qry"))
labels_expected = [label]
return SqlaQuery(
extra_cache_keys=extra_cache_keys,
labels_expected=labels_expected,
sqla_query=qry,
prequeries=prequeries,
)
def _get_timeseries_orderby(
self,
timeseries_limit_metric: Metric,
metrics_by_name: Dict[str, SqlMetric],
columns_by_name: Dict[str, TableColumn],
) -> Column:
if utils.is_adhoc_metric(timeseries_limit_metric):
assert isinstance(timeseries_limit_metric, dict)
ob = self.adhoc_metric_to_sqla(timeseries_limit_metric, columns_by_name)
elif (
isinstance(timeseries_limit_metric, str)
and timeseries_limit_metric in metrics_by_name
):
ob = metrics_by_name[timeseries_limit_metric].get_sqla_col()
else:
raise QueryObjectValidationError(
_("Metric '%(metric)s' does not exist", metric=timeseries_limit_metric)
)
return ob
def _get_top_groups( # pylint: disable=no-self-use
self,
df: pd.DataFrame,
dimensions: List[str],
groupby_exprs: "OrderedDict[str, Any]",
) -> ColumnElement:
groups = []
for _unused, row in df.iterrows():
group = []
for dimension in dimensions:
group.append(groupby_exprs[dimension] == row[dimension])
groups.append(and_(*group))
return or_(*groups)
def query(self, query_obj: QueryObjectDict) -> QueryResult:
qry_start_dttm = datetime.now()
query_str_ext = self.get_query_str_extended(query_obj)
sql = query_str_ext.sql
status = utils.QueryStatus.SUCCESS
errors = None
error_message = None
def assign_column_label(df: pd.DataFrame) -> Optional[pd.DataFrame]:
"""
Some engines change the case or generate bespoke column names, either by
default or due to lack of support for aliasing. This function ensures that
the column names in the DataFrame correspond to what is expected by
the viz components.
Sometimes a query may also contain only order by columns that are not used
as metrics or groupby columns, but need to present in the SQL `select`,
filtering by `labels_expected` make sure we only return columns users want.
:param df: Original DataFrame returned by the engine
:return: Mutated DataFrame
"""
labels_expected = query_str_ext.labels_expected
if df is not None and not df.empty:
if len(df.columns) < len(labels_expected):
raise QueryObjectValidationError(
_("Db engine did not return all queried columns")
)
if len(df.columns) > len(labels_expected):
df = df.iloc[:, 0 : len(labels_expected)]
df.columns = labels_expected
return df
try:
df = self.database.get_df(sql, self.schema, mutator=assign_column_label)
except Exception as ex: # pylint: disable=broad-except
df = pd.DataFrame()
status = utils.QueryStatus.FAILED
logger.warning(
"Query %s on schema %s failed", sql, self.schema, exc_info=True
)
db_engine_spec = self.db_engine_spec
errors = [
dataclasses.asdict(error) for error in db_engine_spec.extract_errors(ex)
]
error_message = utils.error_msg_from_exception(ex)
return QueryResult(
status=status,
df=df,
duration=datetime.now() - qry_start_dttm,
query=sql,
errors=errors,
error_message=error_message,
)
def get_sqla_table_object(self) -> Table:
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self, commit: bool = True) -> MetadataResult:
"""
Fetches the metadata for the table and merges it in
:param commit: should the changes be committed or not.
:return: Tuple with lists of added, removed and modified column names.
"""
new_columns = self.external_metadata()
metrics = []
any_date_col = None
db_engine_spec = self.db_engine_spec
old_columns = db.session.query(TableColumn).filter(TableColumn.table == self)
old_columns_by_name: Dict[str, TableColumn] = {
col.column_name: col for col in old_columns
}
results = MetadataResult(
removed=[
col
for col in old_columns_by_name
if col not in {col["name"] for col in new_columns}
]
)
# clear old columns before adding modified columns back
self.columns = []
for col in new_columns:
old_column = old_columns_by_name.pop(col["name"], None)
if not old_column:
results.added.append(col["name"])
new_column = TableColumn(
column_name=col["name"], type=col["type"], table=self
)
new_column.is_dttm = new_column.is_temporal
db_engine_spec.alter_new_orm_column(new_column)
else:
new_column = old_column
if new_column.type != col["type"]:
results.modified.append(col["name"])
new_column.type = col["type"]
new_column.expression = ""
new_column.groupby = True
new_column.filterable = True
self.columns.append(new_column)
if not any_date_col and new_column.is_temporal:
any_date_col = col["name"]
self.columns.extend(
[col for col in old_columns_by_name.values() if col.expression]
)
metrics.append(
SqlMetric(
metric_name="count",
verbose_name="COUNT(*)",
metric_type="count",
expression="COUNT(*)",
)
)
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
self.add_missing_metrics(metrics)
# Apply config supplied mutations.
config["SQLA_TABLE_MUTATOR"](self)
db.session.merge(self)
if commit:
db.session.commit()
return results
@classmethod
def query_datasources_by_name(
cls,
session: Session,
database: Database,
datasource_name: str,
schema: Optional[str] = None,
) -> List["SqlaTable"]:
query = (
session.query(cls)
.filter_by(database_id=database.id)
.filter_by(table_name=datasource_name)
)
if schema:
query = query.filter_by(schema=schema)
return query.all()
@staticmethod
def default_query(qry: Query) -> Query:
return qry.filter_by(is_sqllab_view=False)
def has_extra_cache_key_calls(self, query_obj: QueryObjectDict) -> bool:
"""
Detects the presence of calls to `ExtraCache` methods in items in query_obj that
can be templated. If any are present, the query must be evaluated to extract
additional keys for the cache key. This method is needed to avoid executing the
template code unnecessarily, as it may contain expensive calls, e.g. to extract
the latest partition of a database.
:param query_obj: query object to analyze
:return: True if there are call(s) to an `ExtraCache` method, False otherwise
"""
templatable_statements: List[str] = []
if self.sql:
templatable_statements.append(self.sql)
if self.fetch_values_predicate:
templatable_statements.append(self.fetch_values_predicate)
extras = query_obj.get("extras", {})
if "where" in extras:
templatable_statements.append(extras["where"])
if "having" in extras:
templatable_statements.append(extras["having"])
if is_feature_enabled("ROW_LEVEL_SECURITY") and self.is_rls_supported:
templatable_statements += [
f.clause for f in security_manager.get_rls_filters(self)
]
for statement in templatable_statements:
if ExtraCache.regex.search(statement):
return True
return False
def get_extra_cache_keys(self, query_obj: QueryObjectDict) -> List[Hashable]:
"""
The cache key of a SqlaTable needs to consider any keys added by the parent
class and any keys added via `ExtraCache`.
:param query_obj: query object to analyze
:return: The extra cache keys
"""
extra_cache_keys = super().get_extra_cache_keys(query_obj)
if self.has_extra_cache_key_calls(query_obj):
sqla_query = self.get_sqla_query(**query_obj)
extra_cache_keys += sqla_query.extra_cache_keys
return extra_cache_keys
@staticmethod
def before_update(
mapper: Mapper, # pylint: disable=unused-argument
connection: Connection, # pylint: disable=unused-argument
target: "SqlaTable",
) -> None:
"""
Check whether before update if the target table already exists.
Note this listener is called when any fields are being updated and thus it is
necessary to first check whether the reference table is being updated.
Note this logic is temporary, given uniqueness is handled via the dataset DAO,
but is necessary until both the legacy datasource editor and datasource/save
endpoints are deprecated.
:param mapper: The table mapper
:param connection: The DB-API connection
:param target: The mapped instance being persisted
:raises Exception: If the target table is not unique
"""
from superset.datasets.commands.exceptions import get_dataset_exist_error_msg
from superset.datasets.dao import DatasetDAO
# Check whether the relevant attributes have changed.
state = db.inspect(target) # pylint: disable=no-member
for attr in ["database_id", "schema", "table_name"]:
history = state.get_history(attr, True)
if history.has_changes():
break
else:
return None
if not DatasetDAO.validate_uniqueness(
target.database_id, target.schema, target.table_name
):
raise Exception(get_dataset_exist_error_msg(target.full_name))
@staticmethod
def update_table(
_mapper: Mapper, _connection: Connection, obj: Union[SqlMetric, TableColumn]
) -> None:
"""
Forces an update to the table's changed_on value when a metric or column on the
table is updated. This busts the cache key for all charts that use the table.
:param _mapper: Unused.
:param _connection: Unused.
:param obj: The metric or column that was updated.
"""
db.session.execute(update(SqlaTable).where(SqlaTable.id == obj.table.id))
sa.event.listen(SqlaTable, "after_insert", security_manager.set_perm)
sa.event.listen(SqlaTable, "after_update", security_manager.set_perm)
sa.event.listen(SqlaTable, "before_update", SqlaTable.before_update)
sa.event.listen(SqlMetric, "after_update", SqlaTable.update_table)
sa.event.listen(TableColumn, "after_update", SqlaTable.update_table)
RLSFilterRoles = Table(
"rls_filter_roles",
metadata,
Column("id", Integer, primary_key=True),
Column("role_id", Integer, ForeignKey("ab_role.id"), nullable=False),
Column("rls_filter_id", Integer, ForeignKey("row_level_security_filters.id")),
)
RLSFilterTables = Table(
"rls_filter_tables",
metadata,
Column("id", Integer, primary_key=True),
Column("table_id", Integer, ForeignKey("tables.id")),
Column("rls_filter_id", Integer, ForeignKey("row_level_security_filters.id")),
)
class RowLevelSecurityFilter(Model, AuditMixinNullable):
"""
Custom where clauses attached to Tables and Roles.
"""
__tablename__ = "row_level_security_filters"
id = Column(Integer, primary_key=True)
filter_type = Column(
Enum(*[filter_type.value for filter_type in utils.RowLevelSecurityFilterType])
)
group_key = Column(String(255), nullable=True)
roles = relationship(
security_manager.role_model,
secondary=RLSFilterRoles,
backref="row_level_security_filters",
)
tables = relationship(
SqlaTable, secondary=RLSFilterTables, backref="row_level_security_filters"
)
clause = Column(Text, nullable=False)
|
the-stack_0_14448 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def _add_classification(df):
i = pd.read_csv("inputs.csv")
frags = set(i['fragment'])
leads = set(i['lead'])
classification = []
for a in list(df['other_id']):
if a in frags:
classification.append("fragment")
elif a in leads:
classification.append("lead")
df["classification"] = classification
return df
def barplot1(df):
ligs = set(df["apo"])
order_dic = {np.mean([float(a) for a in list(df.loc[df["apo"] == l]["volume_overlap"])]): l for l in ligs}
ks = sorted(order_dic.keys(), reverse=True)
ord = [order_dic[k] for k in ks]
g = sns.FacetGrid(df, col="buriedness_method", height=7, aspect=1, legend_out=True)
g = (g.map(sns.barplot, 'apo', 'volume_overlap', 'lig_class', order=ord, hue_order=["fragment", "lead"],
ci=None, palette="Reds").add_legend())
g.set_xticklabels(rotation=45)
g.set(xlabel='', ylabel="", title="")
plt.savefig("barplot1.png")
plt.close()
def barplot2(df, clas = "fragment"):
def over_threshold(l, threshold):
return [l for item in l if item >= threshold]
df = df.loc[df["atom_type"] != 'apolar']
df = df.loc[df["classification"] == clas]
buriedness_method = ["ligsite", "ghecom", "ghecom_internal"]
data = {}
for b in buriedness_method:
x = df.loc[df["buriedness_method"] == b]
x = x.loc[x["atom_type"] != "apolar"]
data.update({b: list(x["atomic_overlap"])})
total = max([len(k) for k in data.keys()])
bm = []
perc = []
thres = []
thresholds = [1, 5, 10, 50, 100]
for t in thresholds:
for b, d in data.items():
bm.append(b)
thres.append(t)
perc.append((len(over_threshold(d, t)) / total) * 100)
ndf = pd.DataFrame({"thresholds": thres, "passed": perc, "buriedness_method": bm})
ax = sns.barplot(x=ndf["thresholds"], y=ndf["passed"], hue=ndf["buriedness_method"],
hue_order= ['ligsite', 'ghecom', 'ghecom_internal'], palette='Reds')
ax.set(xlabel="Overlap threshold (%)", ylabel="Atomic overlap greater than threshold (%)", title="", ylim=[0, 100])
plt.savefig("barplot2_{}.png".format(clas))
plt.close()
def boxplot1(df):
ax = sns.boxplot(x='buriedness_method', y='volume_overlap', hue="classification", data=df, order=["ligsite", "ghecom", "ghecom_internal"],
palette="Reds")
ax.set(xlabel='Buriedness Method', ylabel="Percentage Volume Overlap", title="", ylim=[-5,100])
plt.savefig("boxplot1.png")
plt.close()
if __name__ == "__main__":
sns.set(style="ticks", color_codes=True)
df = pd.read_csv("analysis.csv")
df = df.loc[df["ligand_cavity"] == True]
# barplot1(df)
# barplot1(df)
barplot2(df, clas="fragment")
barplot2(df, clas="lead")
|
the-stack_0_14450 | from newton.db.seed import Trades
from .base import DaoBase
class TradesDao(DaoBase):
def _get_model(self):
return Trades
def history(self, from_datetime, to_datetime, filters=None):
with self._session() as s:
q = s.query(self._Model)
if from_datetime is not None:
q = q.filter(self._Model.entry_datetime >= from_datetime)
if to_datetime is not None:
q = q.filter(self._Model.exit_datetime <= to_datetime)
if filters is None:
return q.all()
if not isinstance(filters, dict):
raise TypeError("params should be 'dict'")
q = self._custom_filters(q, filters)
return q.all()
|
the-stack_0_14451 | from __future__ import division, print_function
from astropy.io import fits as pyfits
from astropy.utils.data import get_pkg_data_filename
from astropy import units as u
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_equal,
assert_allclose)
import pytest
import tempfile
import warnings
from ..lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from ..lightcurvefile import LightCurveFile, KeplerLightCurveFile, TessLightCurveFile
from ..targetpixelfile import KeplerTargetPixelFile, TessTargetPixelFile
from ..utils import LightkurveWarning
from .test_targetpixelfile import TABBY_TPF
# 8th Quarter of Tabby's star
TABBY_Q8 = ("https://archive.stsci.edu/missions/kepler/lightcurves"
"/0084/008462852/kplr008462852-2011073133259_llc.fits")
K2_C08 = ("https://archive.stsci.edu/missions/k2/lightcurves/c8/"
"220100000/39000/ktwo220139473-c08_llc.fits")
KEPLER10 = ("https://archive.stsci.edu/missions/kepler/lightcurves/"
"0119/011904151/kplr011904151-2010009091648_llc.fits")
TESS_SIM = ("https://archive.stsci.edu/missions/tess/ete-6/tid/00/000/"
"004/104/tess2019128220341-0000000410458113-0016-s_lc.fits")
filename_tess = get_pkg_data_filename("data/tess25155310-s01-first-cadences.fits.gz")
filename_tess_custom = get_pkg_data_filename("data/test_TESS_interact_generated_custom-lc.fits")
filename_K2_custom = get_pkg_data_filename("data/test_K2_interact_generated_custom-lc.fits")
def test_invalid_lightcurve():
"""Invalid LightCurves should not be allowed."""
err_string = ("Input arrays have different lengths."
" len(time)=5, len(flux)=4")
time = np.array([1, 2, 3, 4, 5])
flux = np.array([1, 2, 3, 4])
with pytest.raises(ValueError) as err:
LightCurve(time=time, flux=flux)
assert err_string == err.value.args[0]
def test_empty_lightcurve():
"""LightCurves with no data should not be allowed."""
err_string = ("either time or flux must be given")
with pytest.raises(ValueError) as err:
LightCurve()
assert err_string == err.value.args[0]
def test_lc_nan_time():
time = np.array([1, 2, 3, np.nan])
flux = np.array([1, 2, 3, 4])
with pytest.warns(LightkurveWarning, match='contains NaN times'):
LightCurve(time=time, flux=flux)
def test_math_operators():
lc = LightCurve(time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5))
lc_add = lc + 1
lc_sub = lc - 1
lc_mul = lc * 2
lc_div = lc / 2
assert_array_equal(lc_add.flux, lc.flux + 1)
assert_array_equal(lc_sub.flux, lc.flux - 1)
assert_array_equal(lc_mul.flux, lc.flux * 2)
assert_array_equal(lc_div.flux, lc.flux / 2)
def test_rmath_operators():
lc = LightCurve(time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5))
lc_add = 1 + lc
lc_sub = 1 - lc
lc_mul = 2 * lc
lc_div = 2 / lc
assert_array_equal(lc_add.flux, lc.flux + 1)
assert_array_equal(lc_sub.flux, 1 - lc.flux)
assert_array_equal(lc_mul.flux, lc.flux * 2)
assert_array_equal(lc_div.flux, 2 / lc.flux)
@pytest.mark.remote_data
@pytest.mark.parametrize("path, mission", [(TABBY_Q8, "Kepler"), (K2_C08, "K2")])
def test_KeplerLightCurveFile(path, mission):
lcf = KeplerLightCurveFile(path, quality_bitmask=None)
assert lcf.obsmode == 'long cadence'
assert len(lcf.pos_corr1) == len(lcf.pos_corr2)
# The liberal bitmask will cause the lightcurve to contain NaN times
with pytest.warns(LightkurveWarning, match='NaN times'):
lc = lcf.get_lightcurve('SAP_FLUX')
assert lc.channel == lcf.channel
assert lc.mission.lower() == mission.lower()
if lc.mission.lower() == 'kepler':
assert lc.campaign is None
assert lc.quarter == 8
elif lc.mission.lower() == 'k2':
assert lc.campaign == 8
assert lc.quarter is None
assert lc.time_format == 'bkjd'
assert lc.time_scale == 'tdb'
assert lc.astropy_time.scale == 'tdb'
# Does the data match what one would obtain using pyfits.open?
hdu = pyfits.open(path)
assert lc.label == hdu[0].header['OBJECT']
assert_array_equal(lc.time, hdu[1].data['TIME'])
assert_array_equal(lc.flux, hdu[1].data['SAP_FLUX'] / ((hdu[1].header['CROWDSAP'] * hdu[1].header['FLFRCSAP'])))
with pytest.raises(KeyError):
lcf.get_lightcurve('BLABLA')
@pytest.mark.remote_data
@pytest.mark.parametrize("quality_bitmask",
['hardest', 'hard', 'default', None,
1, 100, 2096639])
def test_TessLightCurveFile(quality_bitmask):
tess_file = TessLightCurveFile(TESS_SIM, quality_bitmask=quality_bitmask)
hdu = pyfits.open(TESS_SIM)
lc = tess_file.SAP_FLUX
assert lc.mission == 'TESS'
assert lc.label == hdu[0].header['OBJECT']
assert lc.time_format == 'btjd'
assert lc.time_scale == 'tdb'
assert_array_equal(lc.time[0:10], hdu[1].data['TIME'][0:10])
assert_array_equal(lc.flux[0:10], hdu[1].data['SAP_FLUX'][0:10])
# Regression test for https://github.com/KeplerGO/lightkurve/pull/236
assert np.isnan(lc.time).sum() == 0
with pytest.raises(KeyError):
tess_file.get_lightcurve('DOESNOTEXIST')
@pytest.mark.remote_data
@pytest.mark.parametrize("quality_bitmask, answer", [('hardest', 2661),
('hard', 2706), ('default', 3113), (None, 3279),
(1, 3279), (100, 3252), (2096639, 2661)])
def test_bitmasking(quality_bitmask, answer):
"""Test whether the bitmasking behaves like it should"""
lcf = KeplerLightCurveFile(TABBY_Q8, quality_bitmask=quality_bitmask)
with warnings.catch_warnings():
# Ignore "LightCurve contains NaN times" warnings triggered by liberal masks
warnings.simplefilter("ignore", LightkurveWarning)
flux = lcf.get_lightcurve('SAP_FLUX').flux
assert len(flux) == answer
def test_lightcurve_fold():
"""Test the ``LightCurve.fold()`` method."""
lc = LightCurve(time=np.linspace(0, 10, 100), flux=np.zeros(100)+1,
targetid=999, label='mystar', meta={'ccd': 2}, time_format='bkjd')
fold = lc.fold(period=1)
assert_almost_equal(fold.phase[0], -0.5, 2)
assert_almost_equal(np.min(fold.phase), -0.5, 2)
assert_almost_equal(np.max(fold.phase), 0.5, 2)
assert fold.targetid == lc.targetid
assert fold.label == lc.label
assert fold.meta == lc.meta
assert_array_equal(np.sort(fold.time_original), lc.time)
assert len(fold.time_original) == len(lc.time)
fold = lc.fold(period=1, t0=-0.1)
assert_almost_equal(fold.time[0], -0.5, 2)
assert_almost_equal(np.min(fold.phase), -0.5, 2)
assert_almost_equal(np.max(fold.phase), 0.5, 2)
with warnings.catch_warnings():
# `transit_midpoint` is deprecated and its use will emit a warning
warnings.simplefilter("ignore", LightkurveWarning)
fold = lc.fold(period=1, transit_midpoint=-0.1)
assert_almost_equal(fold.time[0], -0.5, 2)
ax = fold.plot()
assert (ax.get_xlabel() == 'Phase')
ax = fold.scatter()
assert (ax.get_xlabel() == 'Phase')
ax = fold.errorbar()
assert (ax.get_xlabel() == 'Phase')
plt.close('all')
# bad transit midpoint should give a warning
# if user tries a t0 in JD but time is in BKJD
with pytest.warns(LightkurveWarning, match='appears to be given in JD'):
lc.fold(10, 2456600)
def test_lightcurve_fold_issue520():
"""Regression test for #520; accept quantities in `fold()`."""
lc = LightCurve(time=np.linspace(0, 10, 100), flux=np.zeros(100)+1)
lc.fold(period=1*u.day, t0=5*u.day)
def test_lightcurve_append():
"""Test ``LightCurve.append()``."""
lc = LightCurve(time=[1, 2, 3], flux=[1, .5, 1], flux_err=[0.1, 0.2, 0.3])
lc = lc.append(lc)
assert_array_equal(lc.time, 2*[1, 2, 3])
assert_array_equal(lc.flux, 2*[1, .5, 1])
assert_array_equal(lc.flux_err, 2*[0.1, 0.2, 0.3])
# KeplerLightCurve has extra data
lc = KeplerLightCurve(time=[1, 2, 3], flux=[1, .5, 1],
centroid_col=[4, 5, 6], centroid_row=[7, 8, 9],
cadenceno=[10, 11, 12], quality=[10, 20, 30])
lc = lc.append(lc)
assert_array_equal(lc.time, 2*[1, 2, 3])
assert_array_equal(lc.flux, 2*[1, .5, 1])
assert_array_equal(lc.centroid_col, 2*[4, 5, 6])
assert_array_equal(lc.centroid_row, 2*[7, 8, 9])
assert_array_equal(lc.cadenceno, 2*[10, 11, 12])
assert_array_equal(lc.quality, 2*[10, 20, 30])
def test_lightcurve_append_multiple():
"""Test ``LightCurve.append()`` for multiple lightcurves at once."""
lc = LightCurve(time=[1, 2, 3], flux=[1, .5, 1])
lc = lc.append([lc, lc, lc])
assert_array_equal(lc.flux, 4*[1, .5, 1])
assert_array_equal(lc.time, 4*[1, 2, 3])
def test_lightcurve_copy():
"""Test ``LightCurve.copy()``."""
time = np.array([1, 2, 3, 4])
flux = np.array([1, 2, 3, 4])
error = np.array([0.1, 0.2, 0.3, 0.4])
lc = LightCurve(time=time, flux=flux, flux_err=error)
nlc = lc.copy()
assert_array_equal(lc.time, nlc.time)
assert_array_equal(lc.flux, nlc.flux)
assert_array_equal(lc.flux_err, nlc.flux_err)
nlc.time[1] = 5
nlc.flux[1] = 6
nlc.flux_err[1] = 7
# By changing 1 of the 4 data points in the new lightcurve's array-like
# attributes, we expect assert_array_equal to raise an AssertionError
# indicating a mismatch of 1/4 (or 25%).
with pytest.raises(AssertionError, match=r'ismatch.* 25'):
assert_array_equal(lc.time, nlc.time)
with pytest.raises(AssertionError, match=r'ismatch.* 25'):
assert_array_equal(lc.flux, nlc.flux)
with pytest.raises(AssertionError, match=r'ismatch.* 25'):
assert_array_equal(lc.flux_err, nlc.flux_err)
# KeplerLightCurve has extra data
lc = KeplerLightCurve(time=[1, 2, 3], flux=[1, .5, 1],
centroid_col=[4, 5, 6], centroid_row=[7, 8, 9],
cadenceno=[10, 11, 12], quality=[10, 20, 30])
nlc = lc.copy()
assert_array_equal(lc.time, nlc.time)
assert_array_equal(lc.flux, nlc.flux)
assert_array_equal(lc.centroid_col, nlc.centroid_col)
assert_array_equal(lc.centroid_row, nlc.centroid_row)
assert_array_equal(lc.cadenceno, nlc.cadenceno)
assert_array_equal(lc.quality, nlc.quality)
nlc.time[1] = 6
nlc.flux[1] = 7
nlc.centroid_col[1] = 8
nlc.centroid_row[1] = 9
nlc.cadenceno[1] = 10
nlc.quality[1] = 11
# As before, by changing 1/3 data points, we expect a mismatch of 33.3%
# with a repeating decimal. However, float precision for python 2.7 is 10
# decimal digits, while python 3.6's is 13 decimal digits. Therefore,
# a regular expression is needed for both versions.
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.time, nlc.time)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.flux, nlc.flux)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.centroid_col, nlc.centroid_col)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.centroid_row, nlc.centroid_row)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.cadenceno, nlc.cadenceno)
with pytest.raises(AssertionError, match=r'ismatch.* 33\.3+'):
assert_array_equal(lc.quality, nlc.quality)
@pytest.mark.parametrize("path, mission", [(filename_tess_custom, "TESS"),
(filename_K2_custom, "K2")])
def test_custom_lightcurve_file(path, mission):
"""Test whether we can read in custom interact()-produced lightcurvefiles"""
if mission == "K2":
lcf_custom = KeplerLightCurveFile(path)
elif mission == "TESS":
lcf_custom = TessLightCurveFile(path)
assert lcf_custom.hdu[2].name == 'APERTURE'
assert lcf_custom.cadenceno[0] >= 0
assert lcf_custom.dec == lcf_custom.dec
assert lcf_custom.time[-1] > lcf_custom.time[0]
# .interact() files currently define FLUX, and not SAP_FLUX nor PDCSAP_FLUX
lc = lcf_custom.get_lightcurve('FLUX')
assert len(lc.flux) > 0
with pytest.raises(KeyError):
lcf_custom.get_lightcurve('BLABLA')
with pytest.raises(KeyError):
lcf_custom.SAP_FLUX
with pytest.raises(KeyError):
lcf_custom.PDCSAP_FLUX
assert lc.mission.lower() == mission.lower()
# Does the data match what one would obtain using pyfits.open?
hdu = pyfits.open(path)
assert lc.label == hdu[0].header['OBJECT']
assert_array_equal(lc.time, hdu[1].data['TIME'])
assert_array_equal(lc.flux, hdu[1].data['FLUX'])
# TESS has QUALITY while Kepler/K2 has SAP_QUALITY:
if mission == "TESS":
assert "QUALITY" in lcf_custom.hdu[1].columns.names
assert_array_equal(lc.quality, hdu[1].data['QUALITY'])
if mission in ["K2", "Kepler"]:
assert "SAP_QUALITY" in lcf_custom.hdu[1].columns.names
assert_array_equal(lc.quality, hdu[1].data['SAP_QUALITY'])
@pytest.mark.remote_data
def test_lightcurve_plots():
"""Sanity check to verify that lightcurve plotting works"""
for lcf in [KeplerLightCurveFile(TABBY_Q8), TessLightCurveFile(TESS_SIM)]:
lcf.plot()
lcf.plot(flux_types=['SAP_FLUX', 'PDCSAP_FLUX'])
lcf.scatter()
lcf.errorbar()
lcf.SAP_FLUX.plot()
lcf.SAP_FLUX.plot(normalize=False, title="Not the default")
lcf.SAP_FLUX.scatter()
lcf.SAP_FLUX.scatter(c='C3')
lcf.SAP_FLUX.scatter(c=lcf.SAP_FLUX.time, show_colorbar=True, colorbar_label='Time')
lcf.SAP_FLUX.errorbar()
plt.close('all')
@pytest.mark.remote_data
def test_lightcurve_scatter():
"""Sanity check to verify that lightcurve scatter plotting works"""
lcf = KeplerLightCurveFile(KEPLER10)
lc = lcf.PDCSAP_FLUX.flatten()
# get an array of original times, in the same order as the folded lightcurve
foldkw = dict(period=0.837491)
originaltime = LightCurve(lc.time, lc.time)
foldedtimeinorder = originaltime.fold(**foldkw).flux
# plot a grid of phase-folded and not, with colors
fi, ax = plt.subplots(2, 2, figsize=(10,6), sharey=True, sharex='col')
scatterkw = dict( s=5, cmap='winter')
lc.scatter(ax=ax[0,0])
lc.fold(**foldkw).scatter(ax=ax[0,1])
lc.scatter(ax=ax[1,0], c=lc.time, **scatterkw)
lc.fold(**foldkw).scatter(ax=ax[1,1], c=foldedtimeinorder, **scatterkw)
plt.ylim(0.999, 1.001)
def test_cdpp():
"""Test the basics of the CDPP noise metric."""
# A flat lightcurve should have a CDPP close to zero
assert_almost_equal(LightCurve(np.arange(200), np.ones(200)).estimate_cdpp(), 0)
# An artificial lightcurve with sigma=100ppm should have cdpp=100ppm
lc = LightCurve(np.arange(10000), np.random.normal(loc=1, scale=100e-6, size=10000))
assert_almost_equal(lc.estimate_cdpp(transit_duration=1), 100, decimal=-0.5)
# Transit_duration must be an integer (cadences)
with pytest.raises(ValueError):
lc.estimate_cdpp(transit_duration=6.5)
@pytest.mark.remote_data
def test_cdpp_tabby():
"""Compare the cdpp noise metric against the pipeline value."""
lcf = KeplerLightCurveFile(TABBY_Q8)
# Tabby's star shows dips after cadence 1000 which increase the cdpp
lc = LightCurve(lcf.PDCSAP_FLUX.time[:1000], lcf.PDCSAP_FLUX.flux[:1000])
assert(np.abs(lc.estimate_cdpp() - lcf.header(ext=1)['CDPP6_0']) < 30)
def test_bin():
"""Does binning work?"""
lc = LightCurve(time=np.arange(10),
flux=2*np.ones(10),
flux_err=2**.5*np.ones(10))
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux, 2*np.ones(5))
assert_allclose(binned_lc.flux_err, np.ones(5))
assert len(binned_lc.time) == 5
with pytest.raises(ValueError):
lc.bin(method='doesnotexist')
# If `flux_err` is missing, the errors on the bins should be the stddev
lc = LightCurve(time=np.arange(10),
flux=2*np.ones(10))
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux_err, np.zeros(5))
# Regression test for #377
lc = KeplerLightCurve(time=np.arange(10),
flux=2*np.ones(10))
lc.bin(5).remove_outliers()
# Second regression test for #377
lc = KeplerLightCurve(time=np.arange(1000) * 0.02,
flux=1*np.ones(1000) + np.random.normal(0, 1e-6, 1000),
cadenceno=np.arange(1000))
assert np.isclose(lc.bin(2).estimate_cdpp(), 1, rtol=1)
# Regression test for #500
lc = LightCurve(time=np.arange(2000),
flux=np.random.normal(loc=42, scale=0.01, size=2000))
assert np.round(lc.bin(2000).flux_err[0], 2) == 0.01
def test_bin_quality():
"""Binning must also revise the quality and centroid columns."""
lc = KeplerLightCurve(time=[1, 2, 3, 4],
flux=[1, 1, 1, 1],
quality=[0, 1, 2, 3],
centroid_col=[0, 1, 0, 1],
centroid_row=[0, 2, 0, 2])
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.quality, [1, 3]) # Expect bitwise or
assert_allclose(binned_lc.centroid_col, [0.5, 0.5]) # Expect mean
assert_allclose(binned_lc.centroid_row, [1, 1]) # Expect mean
def test_normalize():
"""Does the `LightCurve.normalize()` method normalize the flux?"""
lc = LightCurve(time=np.arange(10), flux=5*np.ones(10), flux_err=0.05*np.ones(10))
assert_allclose(np.median(lc.normalize().flux), 1)
assert_allclose(np.median(lc.normalize().flux_err), 0.05/5)
def test_to_pandas():
"""Test the `LightCurve.to_pandas()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err)
try:
df = lc.to_pandas()
assert_allclose(df.index, time)
assert_allclose(df.flux, flux)
assert_allclose(df.flux_err, flux_err)
df.describe() # Will fail if for Endianness bugs
except ImportError:
# pandas is an optional dependency
pass
def test_to_pandas_kepler():
"""When to_pandas() is executed on a KeplerLightCurve, it should include
extra columns such as `quality`."""
time, flux, quality = range(3), np.ones(3), np.zeros(3)
lc = KeplerLightCurve(time, flux, quality=quality)
try:
df = lc.to_pandas()
assert_allclose(df.quality, quality)
except ImportError:
# pandas is an optional dependency
pass
def test_to_table():
"""Test the `LightCurve.to_table()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err)
tbl = lc.to_table()
assert_allclose(tbl['time'], time)
assert_allclose(tbl['flux'], flux)
assert_allclose(tbl['flux_err'], flux_err)
def test_to_csv():
"""Test the `LightCurve.to_csv()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
try:
lc = LightCurve(time, flux, flux_err)
assert(lc.to_csv(index=False, line_terminator='\n') == 'time,flux,flux_err\n0,1.0,0.0\n1,1.0,0.0\n2,1.0,0.0\n')
except ImportError:
# pandas is an optional dependency
pass
@pytest.mark.remote_data
def test_to_fits():
"""Test the KeplerLightCurve.to_fits() method"""
lcf = KeplerLightCurveFile(TABBY_Q8)
hdu = lcf.PDCSAP_FLUX.to_fits()
KeplerLightCurveFile(hdu) # Regression test for #233
assert type(hdu).__name__ is 'HDUList'
assert len(hdu) == 2
assert hdu[0].header['EXTNAME'] == 'PRIMARY'
assert hdu[1].header['EXTNAME'] == 'LIGHTCURVE'
assert hdu[1].header['TTYPE1'] == 'TIME'
assert hdu[1].header['TTYPE2'] == 'FLUX'
assert hdu[1].header['TTYPE3'] == 'FLUX_ERR'
assert hdu[1].header['TTYPE4'] == 'CADENCENO'
hdu = LightCurve([0, 1, 2, 3, 4], [1, 1, 1, 1, 1]).to_fits()
# Test "round-tripping": can we read-in what we write
lcf_new = LightCurveFile(hdu) # Regression test for #233
assert hdu[0].header['EXTNAME'] == 'PRIMARY'
assert hdu[1].header['EXTNAME'] == 'LIGHTCURVE'
assert hdu[1].header['TTYPE1'] == 'TIME'
assert hdu[1].header['TTYPE2'] == 'FLUX'
# Test aperture mask support in to_fits
for tpf in [KeplerTargetPixelFile(TABBY_TPF), TessTargetPixelFile(filename_tess)]:
random_mask = np.random.randint(0, 2, size=tpf.flux[0].shape, dtype=bool)
thresh_mask = tpf.create_threshold_mask(threshold=3)
lc = tpf.to_lightcurve(aperture_mask=random_mask)
lc.to_fits(path=tempfile.NamedTemporaryFile().name, aperture_mask=random_mask)
lc.to_fits(path=tempfile.NamedTemporaryFile().name, overwrite=True,
flux_column_name='SAP_FLUX')
lc = tpf[0:2].to_lightcurve(aperture_mask=thresh_mask)
lc.to_fits(aperture_mask=thresh_mask, path=tempfile.NamedTemporaryFile().name)
# Test the extra data kwargs
bkg_mask = ~tpf.create_threshold_mask(threshold=0.1)
bkg_lc = tpf.to_lightcurve(aperture_mask=bkg_mask)
lc = tpf.to_lightcurve(aperture_mask=tpf.hdu['APERTURE'].data)
lc = tpf.to_lightcurve(aperture_mask=None)
lc = tpf.to_lightcurve(aperture_mask=thresh_mask)
lc_out = lc - bkg_lc.flux * (thresh_mask.sum()/bkg_mask.sum())
lc_out.to_fits(aperture_mask=thresh_mask, path=tempfile.NamedTemporaryFile().name,
overwrite=True, extra_data={'BKG': bkg_lc.flux})
@pytest.mark.remote_data
def test_astropy_time():
'''Test the `astropy_time` property'''
lcf = KeplerLightCurveFile(TABBY_Q8)
astropy_time = lcf.astropy_time
iso = astropy_time.iso
assert astropy_time.scale == 'tdb'
assert len(iso) == len(lcf.time)
#assert iso[0] == '2011-01-06 20:45:08.811'
#assert iso[-1] == '2011-03-14 20:18:16.734'
def test_astropy_time_bkjd():
"""Does `LightCurve.astropy_time` support bkjd?"""
bkjd = np.array([100, 200])
lc = LightCurve(time=[100, 200], time_format='bkjd')
assert_allclose(lc.astropy_time.jd, bkjd + 2454833.)
def test_lightcurve_repr():
"""Do __str__ and __repr__ work?"""
time, flux = range(3), np.ones(3)
str(LightCurve(time, flux))
str(KeplerLightCurve(time, flux))
str(TessLightCurve(time, flux))
repr(LightCurve(time, flux))
repr(KeplerLightCurve(time, flux))
repr(TessLightCurve(time, flux))
@pytest.mark.remote_data
def test_lightcurvefile_repr():
"""Do __str__ and __repr__ work?"""
lcf = KeplerLightCurveFile(TABBY_Q8)
str(lcf)
repr(lcf)
lcf = TessLightCurveFile(TESS_SIM)
str(lcf)
repr(lcf)
def test_slicing():
"""Does LightCurve.__getitem__() allow slicing?"""
time = np.linspace(0, 10, 10)
flux = np.linspace(100, 200, 10)
flux_err = np.linspace(5, 50, 10)
lc = LightCurve(time, flux, flux_err)
assert_array_equal(lc[0:5].time, time[0:5])
assert_array_equal(lc[2::2].flux, flux[2::2])
assert_array_equal(lc[5:9:-1].flux_err, flux_err[5:9:-1])
# KeplerLightCurves contain additional data arrays that need to be sliced
centroid_col = np.linspace(40, 50, 10)
centroid_row = np.linspace(50, 60, 10)
quality = np.linspace(70, 80, 10)
cadenceno = np.linspace(90, 100, 10)
lc = KeplerLightCurve(time, flux, flux_err,
centroid_col=centroid_col,
centroid_row=centroid_row,
cadenceno=cadenceno,
quality=quality)
assert_array_equal(lc[::3].centroid_col, centroid_col[::3])
assert_array_equal(lc[4:].centroid_row, centroid_row[4:])
assert_array_equal(lc[10:2].quality, quality[10:2])
assert_array_equal(lc[3:6].cadenceno, cadenceno[3:6])
# The same is true for TessLightCurve
lc = TessLightCurve(time, flux, flux_err,
centroid_col=centroid_col,
centroid_row=centroid_row,
cadenceno=cadenceno,
quality=quality)
assert_array_equal(lc[::4].centroid_col, centroid_col[::4])
assert_array_equal(lc[5:].centroid_row, centroid_row[5:])
assert_array_equal(lc[10:3].quality, quality[10:3])
assert_array_equal(lc[4:6].cadenceno, cadenceno[4:6])
def test_boolean_masking():
lc = KeplerLightCurve(time=[1, 2, 3], flux=[1, 1, 10],
quality=[0, 0, 200], cadenceno=[5, 6, 7])
assert_array_equal(lc[lc.flux < 5].time, [1, 2])
assert_array_equal(lc[lc.flux < 5].flux, [1, 1])
assert_array_equal(lc[lc.flux < 5].quality, [0, 0])
assert_array_equal(lc[lc.flux < 5].cadenceno, [5, 6])
def test_remove_nans():
"""Does LightCurve.__getitem__() allow slicing?"""
time, flux = [1, 2, 3, 4], [100, np.nan, 102, np.nan]
lc_clean = LightCurve(time, flux).remove_nans()
assert_array_equal(lc_clean.time, [1, 3])
assert_array_equal(lc_clean.flux, [100, 102])
def test_remove_outliers():
# Does `remove_outliers()` remove outliers?
lc = LightCurve([1, 2, 3, 4], [1, 1, 1000, 1])
lc_clean = lc.remove_outliers(sigma=1)
assert_array_equal(lc_clean.time, [1, 2, 4])
assert_array_equal(lc_clean.flux, [1, 1, 1])
# It should also be possible to return the outlier mask
lc_clean, outlier_mask = lc.remove_outliers(sigma=1, return_mask=True)
assert(len(outlier_mask) == len(lc.flux))
assert(outlier_mask.sum() == 1)
# Can we set sigma_lower and sigma_upper?
lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
lc_clean = lc.remove_outliers(sigma_lower=float('inf'), sigma_upper=1)
assert_array_equal(lc_clean.time, [1, 3, 4, 5])
assert_array_equal(lc_clean.flux, [1, 1, -1000, 1])
@pytest.mark.remote_data
def test_properties(capfd):
'''Test if the describe function produces an output.
The output is 624 characters at the moment, but we might add more properties.'''
lcf = KeplerLightCurveFile(TABBY_Q8)
kplc = lcf.get_lightcurve('SAP_FLUX')
kplc.show_properties()
out, _ = capfd.readouterr()
assert len(out) > 500
def test_flatten_with_nans():
"""Flatten should not remove NaNs."""
lc = LightCurve(time=[1, 2, 3, 4, 5],
flux=[np.nan, 1.1, 1.2, np.nan, 1.4],
flux_err=[1.0, np.nan, 1.2, 1.3, np.nan])
flat_lc = lc.flatten(window_length=3)
assert(len(flat_lc.time) == 5)
assert(np.isfinite(flat_lc.flux).sum() == 3)
assert(np.isfinite(flat_lc.flux_err).sum() == 3)
def test_flatten_robustness():
"""Test various special cases for flatten()."""
# flatten should work with integer fluxes
lc = LightCurve([1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60])
expected_result = np.array([1., 1., 1., 1., 1., 1.])
flat_lc = lc.flatten(window_length=3, polyorder=1)
assert_allclose(flat_lc.flux, expected_result)
# flatten should work even if `window_length > len(flux)`
flat_lc = lc.flatten(window_length=7, polyorder=1)
assert_allclose(flat_lc.flux, flat_lc.flux / np.median(flat_lc.flux))
# flatten should work even if `polyorder >= window_length`
flat_lc = lc.flatten(window_length=3, polyorder=3)
assert_allclose(flat_lc.flux, expected_result)
flat_lc = lc.flatten(window_length=3, polyorder=5)
assert_allclose(flat_lc.flux, expected_result)
# flatten should work even if `break_tolerance = None`
flat_lc = lc.flatten(window_length=3, break_tolerance=None)
assert_allclose(flat_lc.flux, expected_result)
flat_lc, trend_lc = lc.flatten(return_trend=True)
assert_allclose(flat_lc.time, trend_lc.time)
assert_allclose(lc.flux, flat_lc.flux * trend_lc.flux)
def test_iterative_flatten():
'''Test the iterative sigma clipping in flatten '''
# Test a light curve with a single, buried outlier.
x = np.arange(2000)
y = np.sin(x/200)/100 + 1
y[250] -= 0.01
lc = LightCurve(x, y)
# Flatten it
c, f = lc.flatten(window_length=25, niters=2, sigma=3, return_trend=True)
# Only one outlier should remain.
assert np.isclose(c.flux, 1, rtol=0.00001).sum() == 1999
mask = np.zeros(2000, dtype=bool)
mask[250] = True
# Flatten it using a mask to remove the bad data point.
c, f = lc.flatten(window_length=25, niters=1, sigma=3, mask=mask,
return_trend=True)
# Only one outlier should remain.
assert np.isclose(c.flux, 1, rtol=0.00001).sum() == 1999
def test_fill_gaps():
lc = LightCurve([1,2,3,4,6,7,8], [1,1,1,1,1,1,1])
nlc = lc.fill_gaps()
assert(len(lc.time) < len(nlc.time))
assert(np.any(nlc.time == 5))
assert(np.all(nlc.flux == 1))
lc = LightCurve([1,2,3,4,6,7,8], [1,1,np.nan,1,1,1,1])
nlc = lc.fill_gaps()
assert(len(lc.time) < len(nlc.time))
assert(np.any(nlc.time == 5))
assert(np.all(nlc.flux == 1))
assert(np.all(np.isfinite(nlc.flux)))
# Because fill_gaps() uses pandas, check that it works regardless of endianness
# For details see https://github.com/KeplerGO/lightkurve/issues/188
lc = LightCurve(np.array([1, 2, 3, 4, 6, 7, 8], dtype='>f8'),
np.array([1, 1, 1, np.nan, np.nan, 1, 1], dtype='>f8'))
lc.fill_gaps()
lc = LightCurve(np.array([1, 2, 3, 4, 6, 7, 8], dtype='<f8'),
np.array([1, 1, 1, np.nan, np.nan, 1, 1], dtype='<f8'))
lc.fill_gaps()
def test_targetid():
"""Is a generic targetid available on each type of LighCurve object?"""
lc = LightCurve(time=[], targetid=5)
assert lc.targetid == 5
# Can we assign a new value?
lc.targetid = 99
assert lc.targetid == 99
# Does it work for Kepler?
lc = KeplerLightCurve(time=[], targetid=10)
assert lc.targetid == 10
# Can we assign a new value?
lc.targetid = 99
assert lc.targetid == 99
# Does it work for TESS?
lc = TessLightCurve(time=[], targetid=20)
assert lc.targetid == 20
def test_regression_346():
"""Regression test for https://github.com/KeplerGO/lightkurve/issues/346"""
# This previously triggered an IndexError:
KeplerLightCurveFile(K2_C08).PDCSAP_FLUX.to_corrector().correct().estimate_cdpp()
def test_to_timeseries():
"""Test the `LightCurve.to_timeseries()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time, flux, flux_err, time_format="jd")
try:
ts = lc.to_timeseries()
assert_allclose(ts['time'].value, time)
assert_allclose(ts['flux'], flux)
assert_allclose(ts['flux_err'], flux_err)
except ImportError:
# Requires AstroPy v3.2 or later
pass
|
the-stack_0_14452 | """empty message
Revision ID: 5b4a3e3232c8
Revises: 6b071c7c748f
Create Date: 2021-03-13 23:07:54.586777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5b4a3e3232c8'
down_revision = '6b071c7c748f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('predictions', sa.Column('confidence', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('predictions', 'confidence')
# ### end Alembic commands ###
|
the-stack_0_14455 | # engine/base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
import contextlib
import sys
from .interfaces import Connectable
from .interfaces import ExceptionContext
from .util import _distill_params
from .. import exc
from .. import inspection
from .. import log
from .. import util
from ..sql import compiler
from ..sql import util as sql_util
"""Defines :class:`.Connection` and :class:`.Engine`.
"""
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
_schema_translate_map = None
def __init__(
self,
engine,
connection=None,
close_with_result=False,
_branch_from=None,
_execution_options=None,
_dispatch=None,
_has_events=None,
):
"""Construct a new Connection.
"""
self.engine = engine
self.dialect = engine.dialect
self.__branch_from = _branch_from
self.__branch = _branch_from is not None
if _branch_from:
self.__connection = connection
self._execution_options = _execution_options
self._echo = _branch_from._echo
self.should_close_with_result = False
self.dispatch = _dispatch
self._has_events = _branch_from._has_events
self._schema_translate_map = _branch_from._schema_translate_map
else:
self.__connection = (
connection
if connection is not None
else engine.raw_connection()
)
self.__transaction = None
self.__savepoint_seq = 0
self.should_close_with_result = close_with_result
self.__invalid = False
self.__can_reconnect = True
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
assert not _execution_options
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, self.__branch)
def schema_for_object(self, obj):
"""return the schema name for the given schema item taking into
account current schema translate map.
"""
name = obj.schema
schema_translate_map = self._schema_translate_map
if (
schema_translate_map
and name in schema_translate_map
and obj._use_schema_map
):
return schema_translate_map[name]
else:
return name
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
The Core uses this very sparingly, only in the case of
custom SQL default functions that are to be INSERTed as the
primary key of a row where we need to get the value back, so we have
to invoke it distinctly - this is a very uncommon case.
Userland code accesses _branch() when the connect()
method is called. The branched connection
acts as much as possible like the parent, except that it stays
connected when a close() event occurs.
"""
if self.__branch_from:
return self.__branch_from._branch()
else:
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch_from=self,
_execution_options=self._execution_options,
_has_events=self._has_events,
_dispatch=self.dispatch,
)
@property
def _root(self):
"""return the 'root' connection.
Returns 'self' if this connection is not a branch, else
returns the root connection from which we ultimately branched.
"""
if self.__branch_from:
return self.__branch_from
else:
return self
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
def execution_options(self, **opt):
r""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`.Connection` which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`.Connection` references the same
underlying resource, it's usually a good idea to ensure that the copies
will be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\
execute(stmt)
Note that any key/value can be passed to
:meth:`.Connection.execution_options`, and it will be stored in the
``_execution_options`` dictionary of the :class:`.Connection`. It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`.Connection` compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: :class:`.Connection`.
Set the transaction isolation level for the lifespan of this
:class:`.Connection` object. Valid values include those string
values accepted by the :paramref:`.create_engine.isolation_level`
parameter passed to :func:`.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**, not just the copy that is
returned by the call to :meth:`.Connection.execution_options`
method. The isolation level will remain at the given setting until
the DBAPI connection itself is returned to the connection pool, i.e.
the :meth:`.Connection.close` method on the original
:class:`.Connection` is called, where an event handler will emit
additional statements on the DBAPI connection in order to revert the
isolation level change.
.. warning:: The ``isolation_level`` execution option should
**not** be used when a transaction is already established, that
is, the :meth:`.Connection.begin` method or similar has been
called. A database cannot change the isolation level on a
transaction in progress, and different DBAPIs and/or
SQLAlchemy dialects may implicitly roll back or commit
the transaction, or not affect the connection at all.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`.Connection` is invalidated, e.g. via
the :meth:`.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after
the invalidation will not have the isolation level re-applied
to it automatically.
.. seealso::
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:meth:`.Connection.get_isolation_level` - view current level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`SQL Server Transaction Isolation <mssql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2, mysqldb and pymysql dialects.
:param schema_translate_map: Available on: Connection, Engine.
A dictionary mapping schema names to schema names, that will be
applied to the :paramref:`.Table.schema` element of each
:class:`.Table` encountered when SQL or DDL expression elements
are compiled into strings; the resulting schema name will be
converted based on presence in the map of the original name.
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
.. seealso::
:meth:`.Engine.execution_options`
:meth:`.Executable.execution_options`
:meth:`.Connection.get_execution_options`
""" # noqa
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded:: 1.3
.. seealso::
:meth:`.Connection.execution_options`
"""
return self._execution_options
@property
def closed(self):
"""Return True if this connection is closed."""
return (
"_Connection__connection" not in self.__dict__
and not self.__can_reconnect
)
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self._root.__invalid
@property
def connection(self):
"""The underlying DB-API connection managed by this Connection.
.. seealso::
:ref:`dbapi_connections`
"""
try:
return self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
pass
try:
return self._revalidate_connection()
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def get_isolation_level(self):
"""Return the current isolation level assigned to this
:class:`.Connection`.
This will typically be the default isolation level as determined
by the dialect, unless if the
:paramref:`.Connection.execution_options.isolation_level`
feature has been used to alter the isolation level on a
per-:class:`.Connection` basis.
This attribute will typically perform a live SQL operation in order
to procure the current isolation level, so the value returned is the
actual level on the underlying DBAPI connection regardless of how
this state was set. Compare to the
:attr:`.Connection.default_isolation_level` accessor
which returns the dialect-level setting without performing a SQL
query.
.. versionadded:: 0.9.9
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
"""
try:
return self.dialect.get_isolation_level(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self):
"""The default isolation level assigned to this :class:`.Connection`.
This is the isolation level setting that the :class:`.Connection`
has when first procured via the :meth:`.Engine.connect` method.
This level stays in place until the
:paramref:`.Connection.execution_options.isolation_level` is used
to change the setting on a per-:class:`.Connection` basis.
Unlike :meth:`.Connection.get_isolation_level`, this attribute is set
ahead of time from the first connection procured by the dialect,
so SQL query is not invoked when this accessor is called.
.. versionadded:: 0.9.9
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:paramref:`.create_engine.isolation_level`
- set per :class:`.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _revalidate_connection(self):
if self.__branch_from:
return self.__branch_from._revalidate_connection()
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back"
)
self.__connection = self.engine.raw_connection(_connection=self)
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, "is_valid", False)
@property
def _still_open_and_connection_is_valid(self):
return (
not self.closed
and not self.invalidated
and getattr(self.__connection, "is_valid", False)
)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`.Connection`.
"""
return self.connection.info
@util.deprecated_20(":meth:`.Connection.connect`")
def connect(self, close_with_result=False):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`.Connection.execute` method or similar),
this :class:`.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`.Pool` as a source of connectivity (e.g. a "reconnection").
If a transaction was in progress (e.g. the
:meth:`.Connection.begin` method has been called) when
:meth:`.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`.Connection.invalidate` method, just like auto-invalidation,
will at the connection pool level invoke the
:meth:`.PoolEvents.invalidate` event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._root._connection_is_valid:
self._root.__connection.invalidate(exception)
del self._root.__connection
self._root.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute(text("SET search_path TO schema1, schema2"))
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`.Connection` instance will remain usable. When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
.. seealso::
:meth:`.Connection.begin_nested` - use a SAVEPOINT
:meth:`.Connection.begin_twophase` -
use a two phase /XID transaction
:meth:`.Engine.begin` - context manager available from
:class:`.Engine`
"""
if self.__branch_from:
return self.__branch_from.begin()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
.. seealso::
:meth:`.Connection.begin`
:meth:`.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_nested()
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
.. seealso::
:meth:`.Connection.begin`
:meth:`.Connection.begin_twophase`
"""
if self.__branch_from:
return self.__branch_from.begin_twophase(xid=xid)
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress."
)
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return (
self._root.__transaction is not None
and self._root.__transaction.is_active
)
def _begin_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self, deactivate_only=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
def _commit_impl(self, autocommit=False):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if (
not self.__invalid
and self.connection._reset_agent is self.__transaction
):
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = "sa_savepoint_%s" % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _discard_transaction(self, trans):
if trans is self.__transaction:
if trans._is_root:
assert trans._parent is trans
self.__transaction = None
else:
assert trans._parent is not trans
self.__transaction = trans._parent
def _rollback_to_savepoint_impl(
self, name, context, deactivate_only=False
):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
def _release_savepoint_impl(self, name, context):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
assert not self.__branch_from
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared
)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
assert not self.__branch_from
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self._root.in_transaction():
self._root._rollback_impl()
def close(self):
"""Close this :class:`.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`.Pool` referenced
by the :class:`.Engine` that produced this
:class:`.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`.Connection`.
After :meth:`~.Connection.close` is called, the
:class:`.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self.__branch_from:
util.warn_deprecated_20(
"The .close() method on a so-called 'branched' connection is "
"deprecated as of 1.4, as are 'branched' connections overall, "
"and will be removed in a future release. If this is a "
"default-handling function, don't close the connection."
)
try:
del self.__connection
except AttributeError:
pass
finally:
self.__can_reconnect = False
return
try:
conn = self.__connection
except AttributeError:
pass
else:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
# the close() process can end up invalidating us,
# as the pool will call our transaction as the "reset_agent"
# for rollback(), which can then cause an invalidation
if not self.__invalid:
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object_, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object_, *multiparams, **params).scalar()
def execute(self, object_, *multiparams, **params):
r"""Executes a SQL statement construct and returns a
:class:`.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string (deprecated)
* any :class:`.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`~.expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :data:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
.. deprecated:: 2.0 passing a string to :meth:`.Connection.execute` is
deprecated and will be removed in version 2.0. Use the
:func:`~.expression.text` construct with
:meth:`.Connection.execute`, or the
:meth:`.Connection.exec_driver_sql` method to invoke a driver-level
SQL string.
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`~.expression.text` construct.
.. deprecated:: 2.0 use of tuple or scalar positional parameters
is deprecated. All params should be dicts or sequences of dicts.
Use :meth:`.exec_driver_sql` to execute a plain string with
tuple or scalar positional parameters.
"""
if isinstance(object_, util.string_types):
util.warn_deprecated_20(
"Passing a string to Connection.execute() is "
"deprecated and will be removed in version 2.0. Use the "
"text() construct, "
"or the Connection.exec_driver_sql() method to invoke a "
"driver-level SQL string."
)
distilled_params = _distill_params(multiparams, params)
return self._exec_driver_sql_distilled(object_, distilled_params)
try:
meth = object_._execute_on_connection
except AttributeError as err:
util.raise_(
exc.ObjectNotExecutableError(object_), replace_context=err
)
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(), multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = fn(
self, default, multiparams, params
)
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(dialect, self, conn)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(None, default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, default, multiparams, params, ret
)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(
dialect=dialect, schema_translate_map=self._schema_translate_map
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# ensure we don't retain a link to the view object for keys()
# which links to the values, which we don't want to cache
keys = list(distilled_params[0].keys())
else:
keys = []
dialect = self.dialect
if "compiled_cache" in self._execution_options:
elem_cache_key, extracted_params = elem._generate_cache_key()
key = (
dialect,
elem_cache_key,
tuple(sorted(keys)),
bool(self._schema_translate_map),
len(distilled_params) > 1,
)
cache = self._execution_options["compiled_cache"]
compiled_sql = cache.get(key)
if compiled_sql is None:
compiled_sql = elem.compile(
dialect=dialect,
cache_key=(elem_cache_key, extracted_params),
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self._schema_translate_map,
linting=self.dialect.compiler_linting
| compiler.WARN_LINTING,
)
cache[key] = compiled_sql
else:
extracted_params = None
compiled_sql = elem.compile(
dialect=dialect,
column_keys=keys,
inline=len(distilled_params) > 1,
schema_translate_map=self._schema_translate_map,
linting=self.dialect.compiler_linting | compiler.WARN_LINTING,
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql,
distilled_params,
elem,
extracted_params,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = fn(
self, compiled, multiparams, params
)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled,
parameters,
None,
None,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self, compiled, multiparams, params, ret
)
return ret
def _exec_driver_sql_distilled(self, statement, parameters):
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = fn(
self, statement, parameters, {}
)
dialect = self.dialect
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement,
parameters,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self, statement, parameters, {})
return ret
def exec_driver_sql(self, statement, parameters=None):
r"""Executes a SQL statement construct and returns a
:class:`.ResultProxy`.
:param statement: The statement str to be executed. Bound parameters
must use the underlying DBAPI's paramstyle, such as "qmark",
"pyformat", "format", etc.
:param parameters: represent bound parameter values to be used in the
execution. The format is one of: a dictionary of named parameters,
a tuple of positional parameters, or a list containing either
dictionaries or tuples for multiple-execute support.
E.g. multiple dictionaries::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
[{"id":1, "value":"v1"}, {"id":2, "value":"v2"}]
)
Single dictionary::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
dict(id=1, value="v1")
)
Single tuple::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, 'v1')
)
.. seealso::
:pep:`249`
"""
if isinstance(parameters, list) and parameters:
if not isinstance(parameters[0], (dict, tuple)):
raise exc.ArgumentError(
"List argument must consist only of tuples or dictionaries"
)
elif isinstance(parameters, (dict, tuple)):
parameters = [parameters]
return self._exec_driver_sql_distilled(statement, parameters or ())
def _execute_context(
self, dialect, constructor, statement, parameters, *args
):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
# escape "except AttributeError" before revalidating
# to prevent misleading stacktraces in Py3K
conn = None
if conn is None:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except BaseException as e:
self._handle_dbapi_exception(
e, util.text_type(statement), parameters, None, None
)
if self._root.__transaction and not self._root.__transaction.is_active:
raise exc.InvalidRequestError(
"This connection is on an inactive %stransaction. "
"Please rollback() fully before proceeding."
% (
"savepoint "
if isinstance(self.__transaction, NestedTransaction)
else ""
),
code="8s2a",
)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if self._echo:
self.engine.logger.info(statement)
if not self.engine.hide_parameters:
self.engine.logger.info(
"%r",
sql_util._repr_params(
parameters, batches=10, ismulti=context.executemany
),
)
else:
self.engine.logger.info(
"[SQL parameters hidden due to hide_parameters=True]"
)
evt_handled = False
try:
if context.executemany:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor, statement, parameters, context
)
elif not parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, statement, context
)
else:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor, statement, parameters, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self,
cursor,
statement,
parameters,
context,
context.executemany,
)
if context.compiled:
context.post_exec()
result = context._setup_result_proxy()
if context.should_autocommit and self._root.__transaction is None:
self._root._commit_impl(autocommit=True)
# for "connectionless" execution, we have to close this
# Connection after the statement is complete.
if self.should_close_with_result:
assert not context._is_future_result
# ResultProxy already exhausted rows / has no rows.
# close us now
if result._soft_closed:
self.close()
else:
# ResultProxy will close this Connection when no more
# rows to fetch.
result._autoclose_connection = True
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self, cursor, statement, parameters, context, False
)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in (
()
if not self.dialect._has_events
else self.dialect.dispatch.do_execute
):
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(cursor, statement, parameters, context)
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self, cursor, statement, parameters, context, False
)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True
)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(
self, e, statement, parameters, cursor, context
):
exc_info = sys.exc_info()
is_exit_exception = not isinstance(e, Exception)
if not self._is_disconnect:
self._is_disconnect = (
isinstance(e, self.dialect.dbapi.Error)
and not self.closed
and self.dialect.is_disconnect(
e,
self.__connection if not self.invalidated else None,
cursor,
)
) or (is_exit_exception and not self.closed)
invalidate_pool_on_disconnect = not is_exit_exception
if self._reentrant_error:
util.raise_(
exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
),
with_traceback=exc_info[2],
from_=e,
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or (
statement is not None
and context is None
and not is_exit_exception
)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
hide_parameters=self.engine.hide_parameters,
connection_invalidated=self._is_disconnect,
dialect=self.dialect,
ismulti=context.executemany
if context is not None
else None,
)
else:
sqlalchemy_exception = None
newraise = None
if (
self._has_events or self.engine._has_events
) and not self._execution_options.get(
"skip_user_error_events", False
):
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
self.engine,
self,
cursor,
statement,
parameters,
context,
self._is_disconnect,
invalidate_pool_on_disconnect,
)
for fn in self.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if self._is_disconnect != ctx.is_disconnect:
self._is_disconnect = ctx.is_disconnect
if sqlalchemy_exception:
sqlalchemy_exception.connection_invalidated = (
ctx.is_disconnect
)
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = (
ctx.invalidate_pool_on_disconnect
)
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
with util.safe_reraise(warn_only=True):
self._autorollback()
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self.__connection
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
@classmethod
def _handle_dbapi_exception_noconnection(cls, e, dialect, engine):
exc_info = sys.exc_info()
is_disconnect = dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
e,
dialect.dbapi.Error,
hide_parameters=engine.hide_parameters,
connection_invalidated=is_disconnect,
)
else:
sqlalchemy_exception = None
newraise = None
if engine._has_events:
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
engine,
None,
None,
None,
None,
None,
is_disconnect,
True,
)
for fn in engine.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = (
is_disconnect
) = ctx.is_disconnect
if newraise:
util.raise_(newraise, with_traceback=exc_info[2], from_=e)
elif should_wrap:
util.raise_(
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
)
else:
util.raise_(exc_info[1], with_traceback=exc_info[2])
def _run_ddl_visitor(self, visitorcallable, element, **kwargs):
"""run a DDL visitor.
This method is only here so that the MockConnection can change the
options given to the visitor so that "checkfirst" is skipped.
"""
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
@util.deprecated(
"1.4",
"The :meth:`.Connection.transaction` method is deprecated and will be "
"removed in a future release. Use the :meth:`.Engine.begin` "
"context manager instead.",
)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed this :class:`.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute(text("some statement"), {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Connection.begin`::
with conn.begin():
conn.execute(text("some statement"), {'x':5, 'y':10})
As well as with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute(text("some statement"), {'x':5, 'y':10})
.. seealso::
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Engine.transaction` - engine-level version of
:meth:`.Connection.transaction`
"""
kwargs["_sa_skip_warning"] = True
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
@util.deprecated(
"1.4",
"The :meth:`.Connection.run_callable` method is deprecated and will "
"be removed in a future release. Use a context manager instead.",
)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Engine.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
class ExceptionContextImpl(ExceptionContext):
"""Implement the :class:`.ExceptionContext` interface."""
def __init__(
self,
exception,
sqlalchemy_exception,
engine,
connection,
cursor,
statement,
parameters,
context,
is_disconnect,
invalidate_pool_on_disconnect,
):
self.engine = engine
self.connection = connection
self.sqlalchemy_exception = sqlalchemy_exception
self.original_exception = exception
self.execution_context = context
self.statement = statement
self.parameters = parameters
self.is_disconnect = is_disconnect
self.invalidate_pool_on_disconnect = invalidate_pool_on_disconnect
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`~.Connection.begin` method of
:class:`.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute(text("insert into x (a, b) values (1, 2)"))
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`.Connection.begin` method::
with connection.begin():
connection.execute(text("insert into x (a, b) values (1, 2)"))
The Transaction object is **not** threadsafe.
.. seealso::
:meth:`.Connection.begin`
:meth:`.Connection.begin_twophase`
:meth:`.Connection.begin_nested`
.. index::
single: thread safety; Transaction
"""
_is_root = False
def __init__(self, connection, parent):
self.connection = connection
self._actual_parent = parent
self.is_active = True
def _deactivate(self):
self.is_active = False
@property
def _parent(self):
return self._actual_parent or self
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._parent.is_active:
return
if self._parent is self:
self.rollback()
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if self._parent.is_active:
self._do_rollback()
self.is_active = False
self.connection._discard_transaction(self)
def _do_rollback(self):
self._parent._deactivate()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if type_ is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
_is_root = True
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _deactivate(self):
self._do_rollback(deactivate_only=True)
self.is_active = False
def _do_rollback(self, deactivate_only=False):
if self.is_active:
self.connection._rollback_impl(deactivate_only=deactivate_only)
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _deactivate(self):
self._do_rollback(deactivate_only=True)
self.is_active = False
def _do_rollback(self, deactivate_only=False):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent
)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent
)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
.. seealso::
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
_schema_translate_map = None
def __init__(
self,
pool,
dialect,
url,
logging_name=None,
echo=None,
execution_options=None,
hide_parameters=False,
):
self.pool = pool
self.url = url
self.dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.hide_parameters = hide_parameters
log.instance_logger(self, echoflag=echo)
if execution_options:
self.update_execution_options(**execution_options)
@property
def engine(self):
return self
def update_execution_options(self, **opt):
r"""Update the default execution_options dictionary
of this :class:`.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`.create_engine`.
.. seealso::
:meth:`.Connection.execution_options`
:meth:`.Engine.execution_options`
"""
self._execution_options = self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`.Engine` that will provide
:class:`.Connection` objects with the given execution options.
The returned :class:`.Engine` remains related to the original
:class:`.Engine` in that it shares the same connection pool and
other state:
* The :class:`.Pool` used by the new :class:`.Engine` is the
same instance. The :meth:`.Engine.dispose` method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new :class:`.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`.Engine`.
The intent of the :meth:`.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`.Connection` objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`.Connection.info` dictionary, which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt,
params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. seealso::
:meth:`.Connection.execution_options` - update execution options
on a :class:`.Connection` object.
:meth:`.Engine.update_execution_options` - update the execution
options for a given :class:`.Engine` in place.
:meth:`.Engine.get_execution_options`
"""
return OptionEngine(self, opt)
def get_execution_options(self):
""" Get the non-SQL options which will take effect during execution.
.. versionadded: 1.3
.. seealso::
:meth:`.Engine.execution_options`
"""
return self._execution_options
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return "Engine(%r)" % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`.Engine`.
This has the effect of fully closing all **currently checked in**
database connections. Connections that are still checked out
will **not** be closed, however they will no longer be associated
with this :class:`.Engine`, so when they are closed individually,
eventually the :class:`.Pool` which they are associated with will
be garbage collected and they will be closed out fully, if
not already closed on checkin.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested, so as long as the :class:`.Engine` isn't used again,
no new connections will be made.
.. seealso::
:ref:`engine_disposal`
"""
self.pool.dispose()
self.pool = self.pool.recreate()
self.dispatch.engine_disposed(self)
def _execute_default(self, default):
with self.connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self.connect() as conn:
yield conn
else:
yield connection
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute(
text("insert into table (x, y, z) values (1, 2, 3)")
)
conn.execute(text("my_special_procedure(5)"))
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`.Connection` is in "single use" mode, where the
:class:`.ResultProxy` returned by the first call to
:meth:`.Connection.execute` will close the :class:`.Connection` when
that :class:`.ResultProxy` has exhausted all result rows.
.. seealso::
:meth:`.Engine.connect` - procure a :class:`.Connection` from
an :class:`.Engine`.
:meth:`.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`.Connection`.
"""
conn = self.connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
@util.deprecated(
"1.4",
"The :meth:`.Engine.transaction` method is deprecated and will be "
"removed in a future release. Use the :meth:`.Engine.begin` context "
"manager instead.",
)
def transaction(self, callable_, *args, **kwargs):
r"""Execute the given function within a transaction boundary.
The function is passed a :class:`.Connection` newly procured
from :meth:`.Engine.connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute(text("some statement"), {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute(text("some statement"), {'x':5, 'y':10})
.. seealso::
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Connection.transaction` - connection-level version of
:meth:`.Engine.transaction`
"""
kwargs["_sa_skip_warning"] = True
with self.connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
@util.deprecated(
"1.4",
"The :meth:`.Engine.run_callable` method is deprecated and will be "
"removed in a future release. Use the :meth:`.Engine.connect` "
"context manager instead.",
)
def run_callable(self, callable_, *args, **kwargs):
r"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Connection.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
kwargs["_sa_skip_warning"] = True
with self.connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def _run_ddl_visitor(self, visitorcallable, element, **kwargs):
with self.connect() as conn:
conn._run_ddl_visitor(visitorcallable, element, **kwargs)
@util.deprecated_20(
":meth:`.Engine.execute`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`.Connection.execute` method of :class:`.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`.",
)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`.
The arguments are the same as those used by
:meth:`.Connection.execute`.
Here, a :class:`.Connection` is acquired using the
:meth:`~.Engine.connect` method, and the statement executed
with that connection. The returned :class:`.ResultProxy` is flagged
such that when the :class:`.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`.Connection` created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self.connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
@util.deprecated_20(
":meth:`.Engine.scalar`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`.Connection.execute` method of :class:`.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`; the :meth:`.Result.scalar` method can then be "
"used to return a scalar result.",
)
def scalar(self, statement, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self.connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self.connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, close_with_result=False):
"""Return a new :class:`.Connection` object.
The :class:`.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`.Pool`
referenced by this :class:`.Engine`. When the
:meth:`~.Connection.close` method of the :class:`.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`~.Engine.connect`.
"""
return self._connection_cls(self, close_with_result=close_with_result)
@util.deprecated(
"1.4",
"The :meth:`.Engine.table_names` method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`.Inspector.get_table_names`.",
)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection.
"""
with self._optional_conn_ctx_manager(connection) as conn:
insp = inspection.inspect(conn)
return insp.get_table_names(schema)
@util.deprecated(
"1.4",
"The :meth:`.Engine.has_table` method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`.Inspector.has_table`.",
)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection
using the :class:`.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
with self._optional_conn_ctx_manager(None) as conn:
insp = inspection.inspect(conn)
return insp.has_table(table_name, schema=schema)
def _wrap_pool_connect(self, fn, connection):
dialect = self.dialect
try:
return fn()
except dialect.dbapi.Error as e:
if connection is None:
Connection._handle_dbapi_exception_noconnection(
e, dialect, self
)
else:
util.raise_(
sys.exc_info()[1], with_traceback=sys.exc_info()[2]
)
def raw_connection(self, _connection=None):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations when the API provided by :class:`.Connection`
is not needed. When a :class:`.Connection` object is already
present, the DBAPI connection is available using
the :attr:`.Connection.connection` accessor.
.. seealso::
:ref:`dbapi_connections`
"""
return self._wrap_pool_connect(self.pool.connect, _connection)
class OptionEngine(Engine):
_sa_propagate_class_events = False
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
self.hide_parameters = proxied.hide_parameters
log.instance_logger(self, echoflag=self.echo)
# note: this will propagate events that are assigned to the parent
# engine after this OptionEngine is created. Since we share
# the events of the parent we also disallow class-level events
# to apply to the OptionEngine class directly.
#
# the other way this can work would be to transfer existing
# events only, using:
# self.dispatch._update(proxied.dispatch)
#
# that might be more appropriate however it would be a behavioral
# change for logic that assigns events to the parent engine and
# would like it to take effect for the already-created sub-engine.
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or self.__dict__.get(
"_has_events", False
)
def _set_has_events(self, value):
self.__dict__["_has_events"] = value
_has_events = property(_get_has_events, _set_has_events)
|
the-stack_0_14456 | """Test the TcEx API Module."""
# standard library
import datetime
import os
import time
from random import randint
# third-party
import pytest
from pytest import FixtureRequest
# first-party
from tcex.api.tc.v3.tql.tql_operator import TqlOperator
from tests.api.tc.v3.v3_helpers import TestV3, V3Helper
class TestNotes(TestV3):
"""Test TcEx API Interface."""
v3 = None
def setup_method(self):
"""Configure setup before all tests."""
print('') # ensure any following print statements will be on new line
self.v3_helper = V3Helper('notes')
self.v3 = self.v3_helper.v3
self.tcex = self.v3_helper.tcex
def teardown_method(self):
"""Configure teardown before all tests."""
if os.getenv('TEARDOWN_METHOD') is None:
self.v3_helper.cleanup()
def test_note_api_options(self):
"""Test filter keywords."""
super().obj_api_options()
def test_note_filter_keywords(self):
"""Test filter keywords."""
super().obj_filter_keywords()
def test_note_object_properties(self):
"""Test properties."""
super().obj_properties()
def test_note_object_properties_extra(self):
"""Test properties."""
super().obj_properties_extra()
def _test_note_on_obj(self, request, cm_object):
common_note_data = {
'text': 'Generic Note Data. This is auto generated to ensure that adding a note '
'does not remove already existing notes.'
}
notes = self.v3.notes()
# [Pre-Requisite] - Add a note to the provided cm object to ensure that it does not get
# replaced/removed
cm_object.stage_note(common_note_data)
cm_object.update()
# [Pre-Requisite] - Add the note data to the appropriate object
note_data = {'text': f'sample note for {request.node.name} test.'}
# [Pre-Requisite] - Add the appropriate filter for the notes object
if cm_object.type_.lower() == 'artifact':
notes.filter.artifact_id(TqlOperator.EQ, cm_object.model.id)
elif cm_object.type_.lower() == 'case':
notes.filter.case_id(TqlOperator.EQ, cm_object.model.id)
elif cm_object.type_.lower() == 'task':
notes.filter.task_id(TqlOperator.EQ, cm_object.model.id)
else:
assert False, f'Invalid value {cm_object.type_} passed into _test_note_on_obj.'
# [Create Testing] create the object
note = self._stage_note(cm_object, note_data, specify_type=True)
note = self.v3.note(id=note.model.id)
note.get()
# [Retrieve Testing] validate the object returned is the same object
assert note.model.text == note_data.get('text')
# [Retrieve Testing] validate the object got added to the object
assert len(notes) == 2
for note in cm_object.notes:
if note.model.text == note_data.get('text'):
break
else:
assert False, f'No note found -> {note.model.id}'
# [Update Testing] validate the object got updated
note.model.text = 'updated note value'
note.update()
assert len(notes) == 2
for note in cm_object.notes:
if note.model.text == 'updated note value':
break
else:
assert False, f'Note on {cm_object.type_} not updated -> {note.model.id}'
note.delete()
# [Delete Testing] validate the object got deleted to the object
assert len(notes) == 1
for remaining_note in cm_object.notes:
if remaining_note.model.id == note.model.id:
assert (
False
), f'Note found on {cm_object.type_} when it should not have been present.'
# [Delete Testing] validate the object is removed
with pytest.raises(RuntimeError) as exc_info:
note.get()
# [Delete Testing] assert error message contains the correct code
# error -> "(952, 'Error during GET. API status code: 404, ..."
assert '952' in str(exc_info.value)
def test_note_on_case(self, request: FixtureRequest):
"""Test Note functions on a Case Object"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
self._test_note_on_obj(request, case)
def test_note_on_artifact(self, request: FixtureRequest):
"""Test Note functions on a Artifact Object"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
# [Pre-Requisite] - create artifact
artifact_data = {
'case_id': case.model.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
artifact = self.v3.artifact(**artifact_data)
artifact.create()
self._test_note_on_obj(request, artifact)
def test_note_on_task(self, request: FixtureRequest):
"""Test Note functions on a Task Object"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
# [Pre-Requisite] - create task
task_data = {
'case_id': case.model.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
task = self.v3.task(**task_data)
task.create()
self._test_note_on_obj(request, task)
def test_note_get_many(self):
"""Test Artifact Get Many"""
# [Pre-Requisite] - create case
case = self.v3_helper.create_case()
note_count = 10
note_ids = []
for _ in range(0, note_count):
# [Create Testing] define object data
note_data = {
'case_id': case.model.id,
'text': f'sample note randomint - {randint(100, 999)}',
}
# [Create Testing] create the object
note = self.v3.note(**note_data)
# [Create Testing] create the object to the TC API
note.create()
note_ids.append(note.model.id)
# [Retrieve Testing] iterate over all object looking for needle
notes = self.v3.notes(params={'resultLimit': 5})
notes.filter.case_id(TqlOperator.EQ, case.model.id)
assert len(notes) == note_count
for note in notes:
assert note.model.id in note_ids
note_ids.remove(note.model.id)
assert not note_ids, 'Not all artifacts were returned.'
def test_note_get_by_tql_filter_fail_tql(self):
"""Test Artifact Get by TQL"""
# retrieve object using TQL
notes = self.v3.notes()
notes.filter.tql = 'Invalid TQL'
# [Fail Testing] validate the object is removed
with pytest.raises(RuntimeError) as exc_info:
for _ in notes:
pass
# [Fail Testing] assert error message contains the correct code
# error -> "(950, 'Error during pagination. API status code: 400, ..."
assert '950' in str(exc_info.value)
assert notes.request.status_code == 400
def _stage_note(self, cm_object, note_data, specify_type=False):
"""Update the note object to include either the artifact/case/task/workflow_event field."""
keys = ['artifact_id', 'case_id', 'task_id', 'workflow_event_id']
for key in keys:
if key in note_data:
note_data.pop(key)
if specify_type:
note_data['text'] = note_data['text'] + f'Type -> {cm_object.type_}'
if cm_object.type_.lower() == 'artifact':
note_data['artifact_id'] = cm_object.model.id
elif cm_object.type_.lower() == 'case':
note_data['case_id'] = cm_object.model.id
elif cm_object.type_.lower() == 'task':
note_data['task_id'] = cm_object.model.id
elif cm_object.type_.lower() == 'workflow event':
note_data['workflow_event_id'] = cm_object.model.id
else:
assert False, f'Invalid value {cm_object.type_} passed into _test_note_on_obj'
note = self.v3.note(**note_data)
note.create()
return note
def test_note_all_filters(self, request: FixtureRequest):
"""Test TQL Filters for Notes"""
# [Pre-Requisite] - create case
note_data = {'text': f'sample note for {request.node.name} test.'}
case = self.v3_helper.create_case()
# [Pre-Requisite] - create workflow_event
workflow_event_data = {
'case_id': case.model.id,
'summary': 'pytest test workflow event',
}
workflow_event = self.v3.workflow_event(**workflow_event_data)
workflow_event.create()
# [Pre-Requisite] - create task
task_data = {
'case_id': case.model.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
task = self.v3.task(**task_data)
task.create()
# [Pre-Requisite] - create artifact
artifact_data = {
'case_id': case.model.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
artifact = self.v3.artifact(**artifact_data)
artifact.create()
note = self._stage_note(case, note_data, specify_type=True)
notes = self.v3.notes()
future = datetime.datetime.now() + datetime.timedelta(days=10)
future = future.strftime('%Y-%m-%dT%H:%M:%S')
past = datetime.datetime.now() + datetime.timedelta(days=-10)
past = past.strftime('%Y-%m-%dT%H:%M:%S')
# [Filter Testing] case_id
notes.filter.case_id(TqlOperator.EQ, case.model.id)
# [Filter Testing] author
# TODO: [PLAT-????] This fails if the user does not exist on the system
# notes.filter.author(TqlOperator.NE, 'Invalid Author')
# [Filter Testing] date_added
notes.filter.date_added(TqlOperator.GT, past)
# [Filter Testing] has_case -> using id since it's available
notes.filter.has_case.id(TqlOperator.EQ, case.model.id)
# [Filter Testing] id
notes.filter.id(TqlOperator.EQ, note.model.id)
# [Filter Testing] last_modified
notes.filter.last_modified(TqlOperator.LT, future)
# [Filter Testing] summary
notes.filter.summary(TqlOperator.NE, 'Invalid Summary')
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
notes = self.v3.notes()
note = self._stage_note(artifact, note_data, specify_type=True)
notes.filter.artifact_id(TqlOperator.EQ, artifact.model.id)
notes.filter.has_artifact.id(TqlOperator.EQ, artifact.model.id)
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
notes = self.v3.notes()
note = self._stage_note(task, note_data, specify_type=True)
notes.filter.task_id(TqlOperator.EQ, task.model.id)
notes.filter.has_task.id(TqlOperator.EQ, task.model.id)
assert len(notes) == 1, f'Invalid amount of notes retried for tql -> {notes.tql.as_str}'
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
notes = self.v3.notes()
note = self._stage_note(workflow_event, note_data, specify_type=True)
notes.filter.workflow_event_id(TqlOperator.EQ, workflow_event.model.id)
for retrieved_note in notes:
assert retrieved_note.model.text == note.model.text
break
else:
assert False, f'No note found for tql -> {notes.tql.as_str}'
|
the-stack_0_14458 | from collections import Counter
import logging
from .kad_peerinfo import KadPeerHeap, create_kad_peerinfo
from .utils import gather_dict
log = logging.getLogger(__name__)
class SpiderCrawl:
"""Crawl the network and look for given 160-bit keys."""
def __init__(self, protocol, node, peers, ksize, alpha):
"""
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
node: A :class:`~kademlia.node.Node` representing the key we're
looking for
peers: A list of :class:`~kademlia.node.Node` instances that
provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
"""
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = KadPeerHeap(self.node, self.ksize)
self.last_ids_crawled = []
log.info("creating spider with peers: %s", peers)
self.nearest.push(peers)
async def _find(self, rpcmethod):
"""
Get either a value or list of nodes.
Args:
rpcmethod: The protocol's callfindValue or call_find_node.
The process:
1. calls find_* to current ALPHA nearest not already queried nodes,
adding results to current nearest list of k nodes.
2. current nearest list needs to keep track of who has been queried
already sort by nearest, keep KSIZE
3. if list is same as last time, next call should be to everyone not
yet queried
4. repeat, unless nearest list has all been queried, then ur done
"""
log.info("crawling network with nearest: %s", str(tuple(self.nearest)))
count = self.alpha
if self.nearest.get_ids() == self.last_ids_crawled:
count = len(self.nearest)
self.last_ids_crawled = self.nearest.get_ids()
dicts = {}
for peer in self.nearest.get_uncontacted()[:count]:
dicts[peer.peer_id_bytes] = rpcmethod(peer, self.node)
self.nearest.mark_contacted(peer)
found = await gather_dict(dicts)
return await self._nodes_found(found)
async def _nodes_found(self, responses):
raise NotImplementedError
class ValueSpiderCrawl(SpiderCrawl):
def __init__(self, protocol, node, peers, ksize, alpha):
SpiderCrawl.__init__(self, protocol, node, peers, ksize, alpha)
# keep track of the single nearest node without value - per
# section 2.3 so we can set the key there if found
self.nearest_without_value = KadPeerHeap(self.node, 1)
async def find(self):
"""Find either the closest nodes or the value requested."""
return await self._find(self.protocol.call_find_value)
async def _nodes_found(self, responses):
"""Handle the result of an iteration in _find."""
toremove = []
found_values = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.has_value():
found_values.append(response.get_value())
else:
peer = self.nearest.get_node(peerid)
self.nearest_without_value.push(peer)
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if found_values:
return await self._handle_found_values(found_values)
if self.nearest.have_contacted_all():
# not found!
return None
return await self.find()
async def _handle_found_values(self, values):
"""
We got some values!
Exciting. But let's make sure they're all the same or freak out
a little bit. Also, make sure we tell the nearest node that
*didn't* have the value to store it.
"""
value_counts = Counter(values)
if len(value_counts) != 1:
log.warning(
"Got multiple values for key %i: %s", self.node.xor_id, str(values)
)
value = value_counts.most_common(1)[0][0]
peer = self.nearest_without_value.popleft()
if peer:
await self.protocol.call_store(peer, self.node.peer_id_bytes, value)
return value
class NodeSpiderCrawl(SpiderCrawl):
async def find(self):
"""Find the closest nodes."""
return await self._find(self.protocol.call_find_node)
async def _nodes_found(self, responses):
"""Handle the result of an iteration in _find."""
toremove = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
else:
self.nearest.push(response.get_node_list())
self.nearest.remove(toremove)
if self.nearest.have_contacted_all():
return list(self.nearest)
return await self.find()
class RPCFindResponse:
def __init__(self, response):
"""
A wrapper for the result of a RPC find.
Args:
response: This will be a tuple of (<response received>, <value>)
where <value> will be a list of tuples if not found or
a dictionary of {'value': v} where v is the value desired
"""
self.response = response
def happened(self):
"""Did the other host actually respond?"""
return self.response[0]
def has_value(self):
return isinstance(self.response[1], dict)
def get_value(self):
return self.response[1]["value"]
def get_node_list(self):
"""
Get the node list in the response.
If there's no value, this should be set.
"""
nodelist = self.response[1] or []
return [create_kad_peerinfo(*nodeple) for nodeple in nodelist]
|
the-stack_0_14459 | from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'lyse.svg'))
splash.show()
splash.update_text('importing standard library modules')
# stdlib imports
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# 3rd party imports:
splash.update_text('importing numpy')
import numpy as np
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
splash.update_text('importing pandas')
import pandas
splash.update_text('importing Qt')
check_version('qtutils', '2.2.2', '3.0.0')
splash.update_text('importing labscript suite modules')
check_version('labscript_utils', '2.12.4', '3')
from labscript_utils.ls_zprocess import ZMQServer, ProcessTree
import zprocess
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
from labscript_utils.qtwidgets.outputbox import OutputBox
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
from lyse import LYSE_DIR
process_tree = ProcessTree.instance()
# Set a meaningful name for zlock client id:
process_tree.zlock_client.set_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# infer_objects() picks fixed datatypes for columns that are compatible with
# fixed datatypes, dramatically speeding up pickling. It is called here
# rather than when updating the dataframe as calling it during updating may
# call it needlessly often, whereas it only needs to be called prior to
# sending the dataframe to a client requesting it, as we're doing now.
app.filebox.shots_model.infer_objects()
return app.filebox.shots_model.dataframe
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
elif isinstance(request_data, str):
# Just assume it's a filepath:
app.filebox.incoming_queue.put(shared_drive.path_to_local(request_data))
return "Experiment added successfully\n"
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
self.closing = False
def closeEvent(self, event):
if self.closing:
return QtWidgets.QMainWindow.closeEvent(self, event)
if app.on_close_event():
self.closing = True
timeout_time = time.time() + 2
self.delayedClose(timeout_time)
event.ignore()
def delayedClose(self, timeout_time):
if not all(app.workers_terminated().values()) and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.delayedClose(timeout_time))
else:
QtCore.QTimer.singleShot(0, self.close)
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
worker_path = os.path.join(LYSE_DIR, 'analysis_subprocess.py')
child_handles = process_tree.subprocess(
worker_path,
output_redirection_port=self.output_box_port,
startup_timeout=30,
)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit', None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'routinebox.ui'))
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'edit_columns.ui'), EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
# Smaller font for headers:
font = self._vertheader.font()
font.setPointSize(10 if sys.platform == 'darwin' else 8)
self._header.setFont(font)
font.setFamily('Ubuntu Mono')
self._vertheader.setFont(font)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased (the number of levels never
decreases, given the current implementation of concat_with_padding())"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def infer_objects(self):
"""Convert columns in the dataframe with dtype 'object' into compatible, more
specific types, if possible. This improves pickling performance and ensures
multishot analysis code does not encounter columns with dtype 'object' for
non-mixed numerical data, which it might choke on.
"""
self.dataframe = self.dataframe.infer_objects()
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, status_percent=None, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model
to the data in the HDF5 file for that shot. Also sets the percent done, if specified"""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.at[row_number, filepath_colname]
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.at[row_number, column_name] = value
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.at[row_number, column_name] = None
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.at[row_number, column_name] = value
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None and column_name not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if status_percent is not None:
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
def new_row(self, filepath, done=False):
status_item = QtGui.QStandardItem()
if done:
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
else:
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. '.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
val = self.dataframe[col].values[row_number]
if pandas.notna(val):
header_strings.append('{:04d}'.format(val))
else:
header_strings.append('----')
vert_header_text += ' | '.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data, done=False):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath, done=done))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'filebox.ui'))
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually running:
self.shots_model.update_row(filepath, status_percent=status_percent, dataframe_already_updated=True)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'main.ui'), LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionSave_dataframe_as.triggered.connect(lambda: self.on_save_dataframe_triggered(True))
self.ui.actionSave_dataframe.triggered.connect(lambda: self.on_save_dataframe_triggered(False))
self.ui.actionLoad_dataframe.triggered.connect(self.on_load_dataframe_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file, restore_window_geometry)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Load the window geometry now, but then defer the other loading until 50ms
# after the window has shown, so that the GUI pops up faster in the meantime.
try:
self.load_window_geometry_configuration(autoload_config_file)
except Exception:
# ignore error for now and let it be raised again in the call to load_configuration:
restore_window_geometry = True
else:
# Success - skip loading window geometry in load_configuration:
restore_window_geometry = False
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def terminate_all_workers(self):
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.end_child()
def workers_terminated(self):
terminated = {}
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.worker.poll()
terminated[routine.filepath] = routine.worker.returncode is not None
return terminated
def are_you_sure(self):
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
self.terminate_all_workers()
return True
elif not self.are_you_sure():
return False
self.terminate_all_workers()
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename, restore_window_geometry=True):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
if restore_window_geometry:
self.load_window_geometry_configuration(filename)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def load_window_geometry_configuration(self, filename):
"""Load only the window geometry from the config file. It's useful to have this
separate from the rest of load_configuration so that it can be called before the
window is shown."""
lyse_config = LabConfig(filename)
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def on_save_dataframe_triggered(self, choose_folder=True):
df = self.filebox.shots_model.dataframe.copy()
if len(df) > 0:
default = self.exp_config.get('paths', 'experiment_shot_storage')
if choose_folder:
save_path = QtWidgets.QFileDialog.getExistingDirectory(self.ui, 'Select a Folder for the Dataframes', default)
if type(save_path) is tuple:
save_path, _ = save_path
if not save_path:
# User cancelled
return
sequences = df.sequence.unique()
for sequence in sequences:
sequence_df = pandas.DataFrame(df[df['sequence'] == sequence], columns=df.columns).dropna(axis=1, how='all')
labscript = sequence_df['labscript'].iloc[0]
filename = "dataframe_{}_{}.msg".format(sequence.to_pydatetime().strftime("%Y%m%dT%H%M%S"),labscript[:-3])
if not choose_folder:
save_path = os.path.dirname(sequence_df['filepath'].iloc[0])
sequence_df.infer_objects()
for col in sequence_df.columns :
if sequence_df[col].dtype == object:
sequence_df[col] = pandas.to_numeric(sequence_df[col], errors='ignore')
sequence_df.to_msgpack(os.path.join(save_path, filename))
else:
error_dialog('Dataframe is empty')
def on_load_dataframe_triggered(self):
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'dataframe.msg')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select dataframe file to load',
default,
"dataframe files (*.msg)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
df = pandas.read_msgpack(file).sort_values("run time").reset_index()
# Check for changes in the shot files since the dataframe was exported
def changed_since(filepath, time):
if os.path.isfile(filepath):
return os.path.getmtime(filepath) > time
else:
return False
filepaths = df["filepath"].tolist()
changetime_cache = os.path.getmtime(file)
need_updating = np.where(map(lambda x: changed_since(x, changetime_cache), filepaths))[0]
need_updating = np.sort(need_updating)[::-1] # sort in descending order to not remove the wrong items with pop
# Reload the files where changes where made since exporting
for index in need_updating:
filepath = filepaths.pop(index)
self.filebox.incoming_queue.put(filepath)
df = df.drop(need_updating)
self.filebox.shots_model.add_files(filepaths, df, done=True)
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
splash.update_text('starting analysis server')
server = WebServer(app.port)
splash.update_text('done')
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
splash.hide()
qapplication.exec_()
server.shutdown()
|
the-stack_0_14460 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
import os
from pyccel.codegen.printing.fcode import FCodePrinter
from pyccel.codegen.printing.ccode import CCodePrinter
from pyccel.codegen.printing.pycode import PythonCodePrinter
from pyccel.ast.core import FunctionDef, Interface, ModuleHeader
from pyccel.errors.errors import Errors
from pyccel.utilities.stage import PyccelStage
_extension_registry = {'fortran': 'f90', 'c':'c', 'python':'py'}
_header_extension_registry = {'fortran': None, 'c':'h', 'python':None}
printer_registry = {
'fortran':FCodePrinter,
'c':CCodePrinter,
'python':PythonCodePrinter
}
pyccel_stage = PyccelStage()
class Codegen(object):
"""Abstract class for code generator."""
def __init__(self, parser, name):
"""Constructor for Codegen.
parser: pyccel parser
name: str
name of the generated module or program.
"""
pyccel_stage.set_stage('codegen')
self._parser = parser
self._ast = parser.ast
self._name = name
self._printer = None
self._language = None
#TODO verify module name != function name
#it generates a compilation error
self._stmts = {}
_structs = [
'imports',
'body',
'routines',
'classes',
'modules',
'variables',
'interfaces',
]
for key in _structs:
self._stmts[key] = []
self._collect_statements()
self._is_program = self.ast.program is not None
@property
def parser(self):
return self._parser
@property
def name(self):
"""Returns the name associated to the source code"""
return self._name
@property
def imports(self):
"""Returns the imports of the source code."""
return self._stmts['imports']
@property
def variables(self):
"""Returns the variables of the source code."""
return self._stmts['variables']
@property
def body(self):
"""Returns the body of the source code, if it is a Program or Module."""
return self._stmts['body']
@property
def routines(self):
"""Returns functions/subroutines."""
return self._stmts['routines']
@property
def classes(self):
"""Returns the classes if Module."""
return self._stmts['classes']
@property
def interfaces(self):
"""Returns the interfaces."""
return self._stmts['interfaces']
@property
def modules(self):
"""Returns the modules if Program."""
return self._stmts['modules']
@property
def is_program(self):
"""Returns True if a Program."""
return self._is_program
@property
def ast(self):
"""Returns the AST."""
return self._ast
@property
def language(self):
"""Returns the used language"""
return self._language
def set_printer(self, **settings):
""" Set the current codeprinter instance"""
# Get language used (default language used is fortran)
language = settings.pop('language', 'fortran')
# Set language
if not language in ['fortran', 'c', 'python']:
raise ValueError('{} language is not available'.format(language))
self._language = language
# instantiate codePrinter
code_printer = printer_registry[language]
errors = Errors()
errors.set_parser_stage('codegen')
# set the code printer
self._printer = code_printer(self.parser.filename, **settings)
def get_printer_imports(self):
"""return the imports of the current codeprinter"""
return self._printer.get_additional_imports()
def _collect_statements(self):
"""Collects statements and split them into routines, classes, etc."""
scope = self.parser.scope
funcs = []
interfaces = []
for i in scope.functions.values():
if isinstance(i, FunctionDef) and not i.is_header:
funcs.append(i)
elif isinstance(i, Interface):
interfaces.append(i)
self._stmts['imports' ] = list(scope.imports['imports'].values())
self._stmts['variables' ] = list(self.parser.get_variables(scope))
self._stmts['routines' ] = funcs
self._stmts['classes' ] = list(scope.classes.values())
self._stmts['interfaces'] = interfaces
self._stmts['body'] = self.ast
def doprint(self, **settings):
"""Prints the code in the target language."""
if not self._printer:
self.set_printer(**settings)
return self._printer.doprint(self.ast)
def export(self, filename=None, **settings):
"""Export code in filename"""
self.set_printer(**settings)
ext = _extension_registry[self._language]
header_ext = _header_extension_registry[self._language]
if filename is None: filename = self.name
header_filename = '{name}.{ext}'.format(name=filename, ext=header_ext)
filename = '{name}.{ext}'.format(name=filename, ext=ext)
# print module header
if header_ext is not None:
code = self._printer.doprint(ModuleHeader(self.ast))
with open(header_filename, 'w') as f:
for line in code:
f.write(line)
# print module
code = self._printer.doprint(self.ast)
with open(filename, 'w') as f:
for line in code:
f.write(line)
# print program
prog_filename = None
if self.is_program and self.language != 'python':
folder = os.path.dirname(filename)
fname = os.path.basename(filename)
prog_filename = os.path.join(folder,"prog_"+fname)
code = self._printer.doprint(self.ast.program)
with open(prog_filename, 'w') as f:
for line in code:
f.write(line)
return filename, prog_filename
|
the-stack_0_14461 | from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **config)
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **config)
|
the-stack_0_14462 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Editor Widget"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import logging
import os
import os.path as osp
import sys
import functools
import unicodedata
# Third party imports
import qstylizer
from qtpy.compat import getsavefilename
from qtpy.QtCore import (QByteArray, QFileInfo, QPoint, QSize, Qt, QTimer,
Signal, Slot)
from qtpy.QtGui import QFont
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QHBoxLayout,
QLabel, QMainWindow, QMessageBox, QMenu,
QSplitter, QVBoxLayout, QWidget, QListWidget,
QListWidgetItem, QSizePolicy, QToolBar)
# Local imports
from spyder.api.panel import Panel
from spyder.config.base import _, running_under_pytest
from spyder.config.manager import CONF
from spyder.config.utils import (get_edit_filetypes, get_edit_filters,
get_filter, is_kde_desktop, is_anaconda)
from spyder.plugins.editor.utils.autosave import AutosaveForStack
from spyder.plugins.editor.utils.editor import get_file_language
from spyder.plugins.editor.utils.switcher import EditorSwitcherManager
from spyder.plugins.editor.widgets import codeeditor
from spyder.plugins.editor.widgets.editorstack_helpers import (
ThreadManager, FileInfo, StackHistory)
from spyder.plugins.editor.widgets.status import (CursorPositionStatus,
EncodingStatus, EOLStatus,
ReadWriteStatus, VCSStatus)
from spyder.plugins.explorer.widgets.explorer import (
show_in_external_file_explorer)
from spyder.plugins.outlineexplorer.widgets import OutlineExplorerWidget
from spyder.plugins.outlineexplorer.editor import OutlineExplorerProxyEditor
from spyder.plugins.outlineexplorer.api import cell_name
from spyder.py3compat import qbytearray_to_str, to_text_string
from spyder.utils import encoding, sourcecode, syntaxhighlighters
from spyder.utils.icon_manager import ima
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (add_actions, create_action,
create_toolbutton, MENU_SEPARATOR,
mimedata2url, set_menu_icons,
create_waitspinner)
from spyder.utils.stylesheet import (
APP_STYLESHEET, APP_TOOLBAR_STYLESHEET, PANES_TABBAR_STYLESHEET)
from spyder.widgets.findreplace import FindReplace
from spyder.widgets.tabs import BaseTabs
logger = logging.getLogger(__name__)
class TabSwitcherWidget(QListWidget):
"""Show tabs in mru order and change between them."""
def __init__(self, parent, stack_history, tabs):
QListWidget.__init__(self, parent)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Dialog)
self.editor = parent
self.stack_history = stack_history
self.tabs = tabs
self.setSelectionMode(QListWidget.SingleSelection)
self.itemActivated.connect(self.item_selected)
self.id_list = []
self.load_data()
size = CONF.get('main', 'completion/size')
self.resize(*size)
self.set_dialog_position()
self.setCurrentRow(0)
CONF.config_shortcut(lambda: self.select_row(-1), context='Editor',
name='Go to previous file', parent=self)
CONF.config_shortcut(lambda: self.select_row(1), context='Editor',
name='Go to next file', parent=self)
def load_data(self):
"""Fill ListWidget with the tabs texts.
Add elements in inverse order of stack_history.
"""
for index in reversed(self.stack_history):
text = self.tabs.tabText(index)
text = text.replace('&', '')
item = QListWidgetItem(ima.icon('TextFileIcon'), text)
self.addItem(item)
def item_selected(self, item=None):
"""Change to the selected document and hide this widget."""
if item is None:
item = self.currentItem()
# stack history is in inverse order
try:
index = self.stack_history[-(self.currentRow()+1)]
except IndexError:
pass
else:
self.editor.set_stack_index(index)
self.editor.current_changed(index)
self.hide()
def select_row(self, steps):
"""Move selected row a number of steps.
Iterates in a cyclic behaviour.
"""
row = (self.currentRow() + steps) % self.count()
self.setCurrentRow(row)
def set_dialog_position(self):
"""Positions the tab switcher in the top-center of the editor."""
left = self.editor.geometry().width()/2 - self.width()/2
top = (self.editor.tabs.tabBar().geometry().height() +
self.editor.fname_label.geometry().height())
self.move(self.editor.mapToGlobal(QPoint(left, top)))
def keyReleaseEvent(self, event):
"""Reimplement Qt method.
Handle "most recent used" tab behavior,
When ctrl is released and tab_switcher is visible, tab will be changed.
"""
if self.isVisible():
qsc = CONF.get_shortcut(context='Editor', name='Go to next file')
for key in qsc.split('+'):
key = key.lower()
if ((key == 'ctrl' and event.key() == Qt.Key_Control) or
(key == 'alt' and event.key() == Qt.Key_Alt)):
self.item_selected()
event.accept()
def keyPressEvent(self, event):
"""Reimplement Qt method to allow cyclic behavior."""
if event.key() == Qt.Key_Down:
self.select_row(1)
elif event.key() == Qt.Key_Up:
self.select_row(-1)
def focusOutEvent(self, event):
"""Reimplement Qt method to close the widget when loosing focus."""
event.ignore()
if sys.platform == "darwin":
if event.reason() != Qt.ActiveWindowFocusReason:
self.close()
else:
self.close()
class EditorStack(QWidget):
reset_statusbar = Signal()
readonly_changed = Signal(bool)
encoding_changed = Signal(str)
sig_editor_cursor_position_changed = Signal(int, int)
sig_refresh_eol_chars = Signal(str)
sig_refresh_formatting = Signal(bool)
starting_long_process = Signal(str)
ending_long_process = Signal(str)
redirect_stdio = Signal(bool)
exec_in_extconsole = Signal(str, bool)
run_cell_in_ipyclient = Signal(str, object, str, bool)
debug_cell_in_ipyclient = Signal(str, object, str, bool)
update_plugin_title = Signal()
editor_focus_changed = Signal()
zoom_in = Signal()
zoom_out = Signal()
zoom_reset = Signal()
sig_open_file = Signal(dict)
sig_close_file = Signal(str, str)
file_saved = Signal(str, str, str)
file_renamed_in_data = Signal(str, str, str)
opened_files_list_changed = Signal()
active_languages_stats = Signal(set)
todo_results_changed = Signal()
update_code_analysis_actions = Signal()
refresh_file_dependent_actions = Signal()
refresh_save_all_action = Signal()
sig_breakpoints_saved = Signal()
text_changed_at = Signal(str, int)
current_file_changed = Signal(str, int, int, int)
plugin_load = Signal((str,), ())
edit_goto = Signal(str, int, str)
sig_split_vertically = Signal()
sig_split_horizontally = Signal()
sig_new_file = Signal((str,), ())
sig_save_as = Signal()
sig_prev_edit_pos = Signal()
sig_prev_cursor = Signal()
sig_next_cursor = Signal()
sig_prev_warning = Signal()
sig_next_warning = Signal()
sig_go_to_definition = Signal(str, int, int)
sig_perform_completion_request = Signal(str, str, dict)
sig_option_changed = Signal(str, object) # config option needs changing
sig_save_bookmark = Signal(int)
sig_load_bookmark = Signal(int)
sig_save_bookmarks = Signal(str, str)
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Dictionary required by the Help pane to render a docstring.
Examples
--------
>>> help_data = {
'obj_text': str,
'name': str,
'argspec': str,
'note': str,
'docstring': str,
'force_refresh': bool,
'path': str,
}
See Also
--------
:py:meth:spyder.plugins.editor.widgets.editor.EditorStack.send_to_help
"""
def __init__(self, parent, actions):
QWidget.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.threadmanager = ThreadManager(self)
self.new_window = False
self.horsplit_action = None
self.versplit_action = None
self.close_action = None
self.__get_split_actions()
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.menu = None
self.switcher_dlg = None
self.switcher_manager = None
self.tabs = None
self.tabs_switcher = None
self.stack_history = StackHistory(self)
# External panels
self.external_panels = []
self.setup_editorstack(parent, layout)
self.find_widget = None
self.data = []
switcher_action = create_action(
self,
_("File switcher..."),
icon=ima.icon('filelist'),
triggered=self.open_switcher_dlg)
symbolfinder_action = create_action(
self,
_("Find symbols in file..."),
icon=ima.icon('symbol_find'),
triggered=self.open_symbolfinder_dlg)
copy_to_cb_action = create_action(self, _("Copy path to clipboard"),
icon=ima.icon('editcopy'),
triggered=lambda:
QApplication.clipboard().setText(self.get_current_filename()))
close_right = create_action(self, _("Close all to the right"),
triggered=self.close_all_right)
close_all_but_this = create_action(self, _("Close all but this"),
triggered=self.close_all_but_this)
sort_tabs = create_action(self, _("Sort tabs alphabetically"),
triggered=self.sort_file_tabs_alphabetically)
if sys.platform == 'darwin':
text = _("Show in Finder")
else:
text = _("Show in external file explorer")
external_fileexp_action = create_action(
self, text,
triggered=self.show_in_external_file_explorer,
shortcut=CONF.get_shortcut(context="Editor",
name="show in external file explorer"),
context=Qt.WidgetShortcut)
self.menu_actions = actions + [external_fileexp_action,
None, switcher_action,
symbolfinder_action,
copy_to_cb_action, None, close_right,
close_all_but_this, sort_tabs]
self.outlineexplorer = None
self.is_closable = False
self.new_action = None
self.open_action = None
self.save_action = None
self.revert_action = None
self.tempfile_path = None
self.title = _("Editor")
self.todolist_enabled = True
self.is_analysis_done = False
self.linenumbers_enabled = True
self.blanks_enabled = False
self.scrollpastend_enabled = False
self.edgeline_enabled = True
self.edgeline_columns = (79,)
self.close_parentheses_enabled = True
self.close_quotes_enabled = True
self.add_colons_enabled = True
self.auto_unindent_enabled = True
self.indent_chars = " "*4
self.tab_stop_width_spaces = 4
self.show_class_func_dropdown = False
self.help_enabled = False
self.default_font = None
self.wrap_enabled = False
self.tabmode_enabled = False
self.stripmode_enabled = False
self.intelligent_backspace_enabled = True
self.automatic_completions_enabled = True
self.automatic_completion_chars = 3
self.automatic_completion_ms = 300
self.completions_hint_enabled = True
self.completions_hint_after_ms = 500
self.hover_hints_enabled = True
self.format_on_save = False
self.code_snippets_enabled = True
self.code_folding_enabled = True
self.underline_errors_enabled = False
self.highlight_current_line_enabled = False
self.highlight_current_cell_enabled = False
self.occurrence_highlighting_enabled = True
self.occurrence_highlighting_timeout = 1500
self.checkeolchars_enabled = True
self.always_remove_trailing_spaces = False
self.add_newline = False
self.remove_trailing_newlines = False
self.convert_eol_on_save = False
self.convert_eol_on_save_to = 'LF'
self.focus_to_editor = True
self.run_cell_copy = False
self.create_new_file_if_empty = True
self.indent_guides = False
ccs = 'spyder/dark'
if ccs not in syntaxhighlighters.COLOR_SCHEME_NAMES:
ccs = syntaxhighlighters.COLOR_SCHEME_NAMES[0]
self.color_scheme = ccs
self.__file_status_flag = False
# Real-time code analysis
self.analysis_timer = QTimer(self)
self.analysis_timer.setSingleShot(True)
self.analysis_timer.setInterval(1000)
self.analysis_timer.timeout.connect(self.analyze_script)
# Update filename label
self.editor_focus_changed.connect(self.update_fname_label)
# Accepting drops
self.setAcceptDrops(True)
# Local shortcuts
self.shortcuts = self.create_shortcuts()
# For opening last closed tabs
self.last_closed_files = []
# Reference to save msgbox and avoid memory to be freed.
self.msgbox = None
# File types and filters used by the Save As dialog
self.edit_filetypes = None
self.edit_filters = None
# For testing
self.save_dialog_on_tests = not running_under_pytest()
# Autusave component
self.autosave = AutosaveForStack(self)
self.last_cell_call = None
@Slot()
def show_in_external_file_explorer(self, fnames=None):
"""Show file in external file explorer"""
if fnames is None:
fnames = self.get_current_filename()
try:
show_in_external_file_explorer(fnames)
except FileNotFoundError as error:
file = str(error).split("'")[1]
if "xdg-open" in file:
msg_title = _("Warning")
msg = _("Spyder can't show this file in the external file "
"explorer because the <tt>xdg-utils</tt> package is "
"not available on your system.")
QMessageBox.information(self, msg_title, msg,
QMessageBox.Ok)
def create_shortcuts(self):
"""Create local shortcuts"""
# --- Configurable shortcuts
inspect = CONF.config_shortcut(
self.inspect_current_object,
context='Editor',
name='Inspect current object',
parent=self)
set_breakpoint = CONF.config_shortcut(
self.set_or_clear_breakpoint,
context='Editor',
name='Breakpoint',
parent=self)
set_cond_breakpoint = CONF.config_shortcut(
self.set_or_edit_conditional_breakpoint,
context='Editor',
name='Conditional breakpoint',
parent=self)
gotoline = CONF.config_shortcut(
self.go_to_line,
context='Editor',
name='Go to line',
parent=self)
tab = CONF.config_shortcut(
lambda: self.tab_navigation_mru(forward=False),
context='Editor',
name='Go to previous file',
parent=self)
tabshift = CONF.config_shortcut(
self.tab_navigation_mru,
context='Editor',
name='Go to next file',
parent=self)
prevtab = CONF.config_shortcut(
lambda: self.tabs.tab_navigate(-1),
context='Editor',
name='Cycle to previous file',
parent=self)
nexttab = CONF.config_shortcut(
lambda: self.tabs.tab_navigate(1),
context='Editor',
name='Cycle to next file',
parent=self)
run_selection = CONF.config_shortcut(
self.run_selection,
context='Editor',
name='Run selection',
parent=self)
new_file = CONF.config_shortcut(
lambda: self.sig_new_file[()].emit(),
context='Editor',
name='New file',
parent=self)
open_file = CONF.config_shortcut(
lambda: self.plugin_load[()].emit(),
context='Editor',
name='Open file',
parent=self)
save_file = CONF.config_shortcut(
self.save,
context='Editor',
name='Save file',
parent=self)
save_all = CONF.config_shortcut(
self.save_all,
context='Editor',
name='Save all',
parent=self)
save_as = CONF.config_shortcut(
lambda: self.sig_save_as.emit(),
context='Editor',
name='Save As',
parent=self)
close_all = CONF.config_shortcut(
self.close_all_files,
context='Editor',
name='Close all',
parent=self)
prev_edit_pos = CONF.config_shortcut(
lambda: self.sig_prev_edit_pos.emit(),
context="Editor",
name="Last edit location",
parent=self)
prev_cursor = CONF.config_shortcut(
lambda: self.sig_prev_cursor.emit(),
context="Editor",
name="Previous cursor position",
parent=self)
next_cursor = CONF.config_shortcut(
lambda: self.sig_next_cursor.emit(),
context="Editor",
name="Next cursor position",
parent=self)
zoom_in_1 = CONF.config_shortcut(
lambda: self.zoom_in.emit(),
context="Editor",
name="zoom in 1",
parent=self)
zoom_in_2 = CONF.config_shortcut(
lambda: self.zoom_in.emit(),
context="Editor",
name="zoom in 2",
parent=self)
zoom_out = CONF.config_shortcut(
lambda: self.zoom_out.emit(),
context="Editor",
name="zoom out",
parent=self)
zoom_reset = CONF.config_shortcut(
lambda: self.zoom_reset.emit(),
context="Editor",
name="zoom reset",
parent=self)
close_file_1 = CONF.config_shortcut(
self.close_file,
context="Editor",
name="close file 1",
parent=self)
close_file_2 = CONF.config_shortcut(
self.close_file,
context="Editor",
name="close file 2",
parent=self)
run_cell = CONF.config_shortcut(
self.run_cell,
context="Editor",
name="run cell",
parent=self)
debug_cell = CONF.config_shortcut(
self.debug_cell,
context="Editor",
name="debug cell",
parent=self)
run_cell_and_advance = CONF.config_shortcut(
self.run_cell_and_advance,
context="Editor",
name="run cell and advance",
parent=self)
go_to_next_cell = CONF.config_shortcut(
self.advance_cell,
context="Editor",
name="go to next cell",
parent=self)
go_to_previous_cell = CONF.config_shortcut(
lambda: self.advance_cell(reverse=True),
context="Editor",
name="go to previous cell",
parent=self)
re_run_last_cell = CONF.config_shortcut(
self.re_run_last_cell,
context="Editor",
name="re-run last cell",
parent=self)
prev_warning = CONF.config_shortcut(
lambda: self.sig_prev_warning.emit(),
context="Editor",
name="Previous warning",
parent=self)
next_warning = CONF.config_shortcut(
lambda: self.sig_next_warning.emit(),
context="Editor",
name="Next warning",
parent=self)
split_vertically = CONF.config_shortcut(
lambda: self.sig_split_vertically.emit(),
context="Editor",
name="split vertically",
parent=self)
split_horizontally = CONF.config_shortcut(
lambda: self.sig_split_horizontally.emit(),
context="Editor",
name="split horizontally",
parent=self)
close_split = CONF.config_shortcut(
self.close_split,
context="Editor",
name="close split panel",
parent=self)
external_fileexp = CONF.config_shortcut(
self.show_in_external_file_explorer,
context="Editor",
name="show in external file explorer",
parent=self)
# Return configurable ones
return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab,
tabshift, run_selection, new_file, open_file, save_file,
save_all, save_as, close_all, prev_edit_pos, prev_cursor,
next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset,
close_file_1, close_file_2, run_cell, debug_cell,
run_cell_and_advance,
go_to_next_cell, go_to_previous_cell, re_run_last_cell,
prev_warning, next_warning, split_vertically,
split_horizontally, close_split,
prevtab, nexttab, external_fileexp]
def get_shortcut_data(self):
"""
Returns shortcut data, a list of tuples (shortcut, text, default)
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return [sc.data for sc in self.shortcuts]
def setup_editorstack(self, parent, layout):
"""Setup editorstack's layout"""
layout.setSpacing(0)
# Create filename label, spinner and the toolbar that contains them
self.create_top_widgets()
# Add top toolbar
layout.addWidget(self.top_toolbar)
# Tabbar
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
menu_btn.setStyleSheet(str(PANES_TABBAR_STYLESHEET))
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
self.menu.aboutToShow.connect(self.__setup_menu)
corner_widgets = {Qt.TopRightCorner: [menu_btn]}
self.tabs = BaseTabs(self, menu=self.menu, menu_use_tooltips=True,
corner_widgets=corner_widgets)
self.tabs.set_close_function(self.close_file)
self.tabs.tabBar().tabMoved.connect(self.move_editorstack_data)
self.tabs.setMovable(True)
self.stack_history.refresh()
if hasattr(self.tabs, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the editor is detached from the main window
# Fixes spyder-ide/spyder#561.
self.tabs.setDocumentMode(True)
self.tabs.currentChanged.connect(self.current_changed)
tab_container = QWidget()
tab_container.setObjectName('tab-container')
tab_layout = QHBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabs)
layout.addWidget(tab_container)
# Show/hide icons in plugin menus for Mac
if sys.platform == 'darwin':
self.menu.aboutToHide.connect(
lambda menu=self.menu:
set_menu_icons(menu, False))
def create_top_widgets(self):
# Filename label
self.fname_label = QLabel()
# Spacer
spacer = QWidget()
spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
# Spinner
self.spinner = create_waitspinner(size=16, parent=self.fname_label)
# Add widgets to toolbar
self.top_toolbar = QToolBar(self)
self.top_toolbar.addWidget(self.fname_label)
self.top_toolbar.addWidget(spacer)
self.top_toolbar.addWidget(self.spinner)
# Set toolbar style
css = qstylizer.style.StyleSheet()
css.QToolBar.setValues(
margin='0px',
padding='4px',
borderBottom=f'1px solid {QStylePalette.COLOR_BACKGROUND_4}'
)
self.top_toolbar.setStyleSheet(css.toString())
def hide_tooltip(self):
"""Hide any open tooltips."""
for finfo in self.data:
finfo.editor.hide_tooltip()
@Slot()
def update_fname_label(self):
"""Update file name label."""
filename = to_text_string(self.get_current_filename())
if len(filename) > 100:
shorten_filename = u'...' + filename[-100:]
else:
shorten_filename = filename
self.fname_label.setText(shorten_filename)
def add_corner_widgets_to_tabbar(self, widgets):
self.tabs.add_corner_widgets(widgets)
@Slot()
def close_split(self):
"""Closes the editorstack if it is not the last one opened."""
if self.is_closable:
self.close()
def closeEvent(self, event):
"""Overrides QWidget closeEvent()."""
self.threadmanager.close_all_threads()
self.analysis_timer.timeout.disconnect(self.analyze_script)
# Remove editor references from the outline explorer settings
if self.outlineexplorer is not None:
for finfo in self.data:
self.outlineexplorer.remove_editor(finfo.editor.oe_proxy)
for finfo in self.data:
if not finfo.editor.is_cloned:
finfo.editor.notify_close()
QWidget.closeEvent(self, event)
def clone_editor_from(self, other_finfo, set_current):
fname = other_finfo.filename
enc = other_finfo.encoding
new = other_finfo.newly_created
finfo = self.create_new_editor(fname, enc, "",
set_current=set_current, new=new,
cloned_from=other_finfo.editor)
finfo.set_todo_results(other_finfo.todo_results)
return finfo.editor
def clone_from(self, other):
"""Clone EditorStack from other instance"""
for other_finfo in other.data:
self.clone_editor_from(other_finfo, set_current=True)
self.set_stack_index(other.get_stack_index())
@Slot()
@Slot(str)
def open_switcher_dlg(self, initial_text=''):
"""Open file list management dialog box"""
if not self.tabs.count():
return
if self.switcher_dlg is not None and self.switcher_dlg.isVisible():
self.switcher_dlg.hide()
self.switcher_dlg.clear()
return
if self.switcher_dlg is None:
from spyder.widgets.switcher import Switcher
self.switcher_dlg = Switcher(self)
self.switcher_manager = EditorSwitcherManager(
self.get_plugin(),
self.switcher_dlg,
lambda: self.get_current_editor(),
lambda: self,
section=self.get_plugin_title())
self.switcher_dlg.set_search_text(initial_text)
self.switcher_dlg.setup()
self.switcher_dlg.show()
# Note: the +1 pixel on the top makes it look better
delta_top = (self.tabs.tabBar().geometry().height() +
self.fname_label.geometry().height() + 1)
self.switcher_dlg.set_position(delta_top)
@Slot()
def open_symbolfinder_dlg(self):
self.open_switcher_dlg(initial_text='@')
def get_plugin(self):
"""Get the plugin of the parent widget."""
# Needed for the editor stack to use its own switcher instance.
# See spyder-ide/spyder#10684.
return self.parent().plugin
def get_plugin_title(self):
"""Get the plugin title of the parent widget."""
# Needed for the editor stack to use its own switcher instance.
# See spyder-ide/spyder#9469.
return self.get_plugin().get_plugin_title()
def go_to_line(self, line=None):
"""Go to line dialog"""
if line is not None:
# When this method is called from the flileswitcher, a line
# number is specified, so there is no need for the dialog.
self.get_current_editor().go_to_line(line)
else:
if self.data:
self.get_current_editor().exec_gotolinedialog()
def set_or_clear_breakpoint(self):
"""Set/clear breakpoint"""
if self.data:
editor = self.get_current_editor()
editor.debugger.toogle_breakpoint()
def set_or_edit_conditional_breakpoint(self):
"""Set conditional breakpoint"""
if self.data:
editor = self.get_current_editor()
editor.debugger.toogle_breakpoint(edit_condition=True)
def set_bookmark(self, slot_num):
"""Bookmark current position to given slot."""
if self.data:
editor = self.get_current_editor()
editor.add_bookmark(slot_num)
def inspect_current_object(self, pos=None):
"""Inspect current object in the Help plugin"""
editor = self.get_current_editor()
editor.sig_display_object_info.connect(self.display_help)
cursor = None
offset = editor.get_position('cursor')
if pos:
cursor = editor.get_last_hover_cursor()
if cursor:
offset = cursor.position()
else:
return
line, col = editor.get_cursor_line_column(cursor)
editor.request_hover(line, col, offset,
show_hint=False, clicked=bool(pos))
@Slot(str, bool)
def display_help(self, help_text, clicked):
editor = self.get_current_editor()
if clicked:
name = editor.get_last_hover_word()
else:
name = editor.get_current_word(help_req=True)
try:
editor.sig_display_object_info.disconnect(self.display_help)
except TypeError:
# Needed to prevent an error after some time in idle.
# See spyder-ide/spyder#11228
pass
self.send_to_help(name, help_text, force=True)
# ------ Editor Widget Settings
def set_closable(self, state):
"""Parent widget must handle the closable state"""
self.is_closable = state
def set_io_actions(self, new_action, open_action,
save_action, revert_action):
self.new_action = new_action
self.open_action = open_action
self.save_action = save_action
self.revert_action = revert_action
def set_find_widget(self, find_widget):
self.find_widget = find_widget
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
def add_outlineexplorer_button(self, editor_plugin):
oe_btn = create_toolbutton(editor_plugin)
oe_btn.setDefaultAction(self.outlineexplorer.visibility_action)
self.add_corner_widgets_to_tabbar([5, oe_btn])
def set_tempfile_path(self, path):
self.tempfile_path = path
def set_title(self, text):
self.title = text
def set_classfunc_dropdown_visible(self, state):
self.show_class_func_dropdown = state
if self.data:
for finfo in self.data:
if finfo.editor.is_python_like():
finfo.editor.classfuncdropdown.setVisible(state)
def __update_editor_margins(self, editor):
editor.linenumberarea.setup_margins(
linenumbers=self.linenumbers_enabled, markers=self.has_markers())
def has_markers(self):
"""Return True if this editorstack has a marker margin for TODOs or
code analysis"""
return self.todolist_enabled
def set_todolist_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'todo_list')
self.todolist_enabled = state
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
finfo.cleanup_todo_results()
if state and current_finfo is not None:
if current_finfo is not finfo:
finfo.run_todo_finder()
def set_linenumbers_enabled(self, state, current_finfo=None):
# CONF.get(self.CONF_SECTION, 'line_numbers')
self.linenumbers_enabled = state
if self.data:
for finfo in self.data:
self.__update_editor_margins(finfo.editor)
def set_blanks_enabled(self, state):
self.blanks_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_blanks_enabled(state)
def set_scrollpastend_enabled(self, state):
self.scrollpastend_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_scrollpastend_enabled(state)
def set_edgeline_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'edge_line')
self.edgeline_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.edge_line.set_enabled(state)
def set_edgeline_columns(self, columns):
# CONF.get(self.CONF_SECTION, 'edge_line_column')
self.edgeline_columns = columns
if self.data:
for finfo in self.data:
finfo.editor.edge_line.set_columns(columns)
def set_indent_guides(self, state):
self.indent_guides = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_identation_guides(state)
def set_close_parentheses_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_parentheses')
self.close_parentheses_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_parentheses_enabled(state)
def set_close_quotes_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_quotes')
self.close_quotes_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_quotes_enabled(state)
def set_add_colons_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'add_colons')
self.add_colons_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_add_colons_enabled(state)
def set_auto_unindent_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'auto_unindent')
self.auto_unindent_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_auto_unindent_enabled(state)
def set_indent_chars(self, indent_chars):
# CONF.get(self.CONF_SECTION, 'indent_chars')
indent_chars = indent_chars[1:-1] # removing the leading/ending '*'
self.indent_chars = indent_chars
if self.data:
for finfo in self.data:
finfo.editor.set_indent_chars(indent_chars)
def set_tab_stop_width_spaces(self, tab_stop_width_spaces):
# CONF.get(self.CONF_SECTION, 'tab_stop_width')
self.tab_stop_width_spaces = tab_stop_width_spaces
if self.data:
for finfo in self.data:
finfo.editor.tab_stop_width_spaces = tab_stop_width_spaces
finfo.editor.update_tab_stop_width_spaces()
def set_help_enabled(self, state):
self.help_enabled = state
def set_default_font(self, font, color_scheme=None):
self.default_font = font
if color_scheme is not None:
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_font(font, color_scheme)
def set_color_scheme(self, color_scheme):
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_color_scheme(color_scheme)
def set_wrap_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'wrap')
self.wrap_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_wrap_mode(state)
def set_tabmode_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'tab_always_indent')
self.tabmode_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_tab_mode(state)
def set_stripmode_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'strip_trailing_spaces_on_modify')
self.stripmode_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_strip_mode(state)
def set_intelligent_backspace_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'intelligent_backspace')
self.intelligent_backspace_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_intelligent_backspace(state)
def set_code_snippets_enabled(self, state):
self.code_snippets_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_code_snippets(state)
def set_code_folding_enabled(self, state):
self.code_folding_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_code_folding(state)
def set_automatic_completions_enabled(self, state):
self.automatic_completions_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_automatic_completions(state)
def set_automatic_completions_after_chars(self, chars):
self.automatic_completion_chars = chars
if self.data:
for finfo in self.data:
finfo.editor.set_automatic_completions_after_chars(chars)
def set_automatic_completions_after_ms(self, ms):
self.automatic_completion_ms = ms
if self.data:
for finfo in self.data:
finfo.editor.set_automatic_completions_after_ms(ms)
def set_completions_hint_enabled(self, state):
self.completions_hint_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_completions_hint(state)
def set_completions_hint_after_ms(self, ms):
self.completions_hint_after_ms = ms
if self.data:
for finfo in self.data:
finfo.editor.set_completions_hint_after_ms(ms)
def set_hover_hints_enabled(self, state):
self.hover_hints_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_hover_hints(state)
def set_format_on_save(self, state):
self.format_on_save = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_format_on_save(state)
def set_occurrence_highlighting_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'occurrence_highlighting')
self.occurrence_highlighting_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_occurrence_highlighting(state)
def set_occurrence_highlighting_timeout(self, timeout):
# CONF.get(self.CONF_SECTION, 'occurrence_highlighting/timeout')
self.occurrence_highlighting_timeout = timeout
if self.data:
for finfo in self.data:
finfo.editor.set_occurrence_timeout(timeout)
def set_underline_errors_enabled(self, state):
self.underline_errors_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_underline_errors_enabled(state)
def set_highlight_current_line_enabled(self, state):
self.highlight_current_line_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_line(state)
def set_highlight_current_cell_enabled(self, state):
self.highlight_current_cell_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_cell(state)
def set_checkeolchars_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'check_eol_chars')
self.checkeolchars_enabled = state
def set_always_remove_trailing_spaces(self, state):
# CONF.get(self.CONF_SECTION, 'always_remove_trailing_spaces')
self.always_remove_trailing_spaces = state
if self.data:
for finfo in self.data:
finfo.editor.set_remove_trailing_spaces(state)
def set_add_newline(self, state):
self.add_newline = state
if self.data:
for finfo in self.data:
finfo.editor.set_add_newline(state)
def set_remove_trailing_newlines(self, state):
self.remove_trailing_newlines = state
if self.data:
for finfo in self.data:
finfo.editor.set_remove_trailing_newlines(state)
def set_convert_eol_on_save(self, state):
"""If `state` is `True`, saving files will convert line endings."""
# CONF.get(self.CONF_SECTION, 'convert_eol_on_save')
self.convert_eol_on_save = state
def set_convert_eol_on_save_to(self, state):
"""`state` can be one of ('LF', 'CRLF', 'CR')"""
# CONF.get(self.CONF_SECTION, 'convert_eol_on_save_to')
self.convert_eol_on_save_to = state
def set_focus_to_editor(self, state):
self.focus_to_editor = state
def set_run_cell_copy(self, state):
"""If `state` is ``True``, code cells will be copied to the console."""
self.run_cell_copy = state
def set_current_project_path(self, root_path=None):
"""
Set the current active project root path.
Parameters
----------
root_path: str or None, optional
Path to current project root path. Default is None.
"""
for finfo in self.data:
finfo.editor.set_current_project_path(root_path)
# ------ Stacked widget management
def get_stack_index(self):
return self.tabs.currentIndex()
def get_current_finfo(self):
if self.data:
return self.data[self.get_stack_index()]
def get_current_editor(self):
return self.tabs.currentWidget()
def get_stack_count(self):
return self.tabs.count()
def set_stack_index(self, index, instance=None):
if instance == self or instance == None:
self.tabs.setCurrentIndex(index)
def set_tabbar_visible(self, state):
self.tabs.tabBar().setVisible(state)
def remove_from_data(self, index):
self.tabs.blockSignals(True)
self.tabs.removeTab(index)
self.data.pop(index)
self.tabs.blockSignals(False)
self.update_actions()
def __modified_readonly_title(self, title, is_modified, is_readonly):
if is_modified is not None and is_modified:
title += "*"
if is_readonly is not None and is_readonly:
title = "(%s)" % title
return title
def get_tab_text(self, index, is_modified=None, is_readonly=None):
"""Return tab title."""
files_path_list = [finfo.filename for finfo in self.data]
fname = self.data[index].filename
fname = sourcecode.disambiguate_fname(files_path_list, fname)
return self.__modified_readonly_title(fname,
is_modified, is_readonly)
def get_tab_tip(self, filename, is_modified=None, is_readonly=None):
"""Return tab menu title"""
text = u"%s — %s"
text = self.__modified_readonly_title(text,
is_modified, is_readonly)
if self.tempfile_path is not None\
and filename == encoding.to_unicode_from_fs(self.tempfile_path):
temp_file_str = to_text_string(_("Temporary file"))
return text % (temp_file_str, self.tempfile_path)
else:
return text % (osp.basename(filename), osp.dirname(filename))
def add_to_data(self, finfo, set_current, add_where='end'):
finfo.editor.oe_proxy = None
index = 0 if add_where == 'start' else len(self.data)
self.data.insert(index, finfo)
index = self.data.index(finfo)
editor = finfo.editor
self.tabs.insertTab(index, editor, self.get_tab_text(index))
self.set_stack_title(index, False)
if set_current:
self.set_stack_index(index)
self.current_changed(index)
self.update_actions()
def __repopulate_stack(self):
self.tabs.blockSignals(True)
self.tabs.clear()
for finfo in self.data:
if finfo.newly_created:
is_modified = True
else:
is_modified = None
index = self.data.index(finfo)
tab_text = self.get_tab_text(index, is_modified)
tab_tip = self.get_tab_tip(finfo.filename)
index = self.tabs.addTab(finfo.editor, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
self.tabs.blockSignals(False)
def rename_in_data(self, original_filename, new_filename):
index = self.has_filename(original_filename)
if index is None:
return
finfo = self.data[index]
# Send close request to LSP
finfo.editor.notify_close()
# Set new filename
finfo.filename = new_filename
finfo.editor.filename = new_filename
# File type has changed!
original_ext = osp.splitext(original_filename)[1]
new_ext = osp.splitext(new_filename)[1]
if original_ext != new_ext:
# Set file language and re-run highlighter
txt = to_text_string(finfo.editor.get_text_with_eol())
language = get_file_language(new_filename, txt)
finfo.editor.set_language(language, new_filename)
finfo.editor.run_pygments_highlighter()
# If the user renamed the file to a different language, we
# need to emit sig_open_file to see if we can start a
# language server for it.
options = {
'language': language,
'filename': new_filename,
'codeeditor': finfo.editor
}
self.sig_open_file.emit(options)
# Update panels
finfo.editor.set_debug_panel(
show_debug_panel=True, language=language)
finfo.editor.cleanup_code_analysis()
finfo.editor.cleanup_folding()
else:
# If there's no language change, we simply need to request a
# document_did_open for the new file.
finfo.editor.document_did_open()
set_new_index = index == self.get_stack_index()
current_fname = self.get_current_filename()
finfo.editor.filename = new_filename
new_index = self.data.index(finfo)
self.__repopulate_stack()
if set_new_index:
self.set_stack_index(new_index)
else:
# Fixes spyder-ide/spyder#1287.
self.set_current_filename(current_fname)
if self.outlineexplorer is not None:
self.outlineexplorer.file_renamed(
finfo.editor.oe_proxy, finfo.filename)
return new_index
def set_stack_title(self, index, is_modified):
finfo = self.data[index]
fname = finfo.filename
is_modified = (is_modified or finfo.newly_created) and not finfo.default
is_readonly = finfo.editor.isReadOnly()
tab_text = self.get_tab_text(index, is_modified, is_readonly)
tab_tip = self.get_tab_tip(fname, is_modified, is_readonly)
# Only update tab text if have changed, otherwise an unwanted scrolling
# will happen when changing tabs. See spyder-ide/spyder#1170.
if tab_text != self.tabs.tabText(index):
self.tabs.setTabText(index, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
# ------ Context menu
def __setup_menu(self):
"""Setup tab context menu before showing it"""
self.menu.clear()
if self.data:
actions = self.menu_actions
else:
actions = (self.new_action, self.open_action)
self.setFocus() # --> Editor.__get_focus_editortabwidget
add_actions(self.menu, list(actions) + self.__get_split_actions())
self.close_action.setEnabled(self.is_closable)
if sys.platform == 'darwin':
set_menu_icons(self.menu, True)
# ------ Hor/Ver splitting
def __get_split_actions(self):
if self.parent() is not None:
plugin = self.parent().plugin
else:
plugin = None
# New window
if plugin is not None:
self.new_window_action = create_action(
self, _("New window"),
icon=ima.icon('newwindow'),
tip=_("Create a new editor window"),
triggered=plugin.create_new_window)
# Splitting
self.versplit_action = create_action(
self,
_("Split vertically"),
icon=ima.icon('versplit'),
tip=_("Split vertically this editor window"),
triggered=lambda: self.sig_split_vertically.emit(),
shortcut=CONF.get_shortcut(context='Editor',
name='split vertically'),
context=Qt.WidgetShortcut)
self.horsplit_action = create_action(
self,
_("Split horizontally"),
icon=ima.icon('horsplit'),
tip=_("Split horizontally this editor window"),
triggered=lambda: self.sig_split_horizontally.emit(),
shortcut=CONF.get_shortcut(context='Editor',
name='split horizontally'),
context=Qt.WidgetShortcut)
self.close_action = create_action(
self,
_("Close this panel"),
icon=ima.icon('close_panel'),
triggered=self.close_split,
shortcut=CONF.get_shortcut(context='Editor',
name='close split panel'),
context=Qt.WidgetShortcut)
# Regular actions
actions = [MENU_SEPARATOR, self.versplit_action,
self.horsplit_action, self.close_action]
if self.new_window:
window = self.window()
close_window_action = create_action(
self, _("Close window"),
icon=ima.icon('close_pane'),
triggered=window.close)
actions += [MENU_SEPARATOR, self.new_window_action,
close_window_action]
elif plugin is not None:
if plugin._undocked_window is not None:
actions += [MENU_SEPARATOR, plugin._dock_action]
else:
actions += [MENU_SEPARATOR, self.new_window_action,
plugin._undock_action,
plugin._close_plugin_action]
return actions
def reset_orientation(self):
self.horsplit_action.setEnabled(True)
self.versplit_action.setEnabled(True)
def set_orientation(self, orientation):
self.horsplit_action.setEnabled(orientation == Qt.Horizontal)
self.versplit_action.setEnabled(orientation == Qt.Vertical)
def update_actions(self):
state = self.get_stack_count() > 0
self.horsplit_action.setEnabled(state)
self.versplit_action.setEnabled(state)
# ------ Accessors
def get_current_filename(self):
if self.data:
return self.data[self.get_stack_index()].filename
def get_current_language(self):
if self.data:
return self.data[self.get_stack_index()].editor.language
def get_filenames(self):
"""
Return a list with the names of all the files currently opened in
the editorstack.
"""
return [finfo.filename for finfo in self.data]
def has_filename(self, filename):
"""Return the self.data index position for the filename.
Args:
filename: Name of the file to search for in self.data.
Returns:
The self.data index for the filename. Returns None
if the filename is not found in self.data.
"""
fixpath = lambda path: osp.normcase(osp.realpath(path))
for index, finfo in enumerate(self.data):
if fixpath(filename) == fixpath(finfo.filename):
return index
return None
def set_current_filename(self, filename, focus=True):
"""Set current filename and return the associated editor instance."""
index = self.has_filename(filename)
if index is not None:
if focus:
self.set_stack_index(index)
editor = self.data[index].editor
if focus:
editor.setFocus()
else:
self.stack_history.remove_and_append(index)
return editor
def is_file_opened(self, filename=None):
"""Return if filename is in the editor stack.
Args:
filename: Name of the file to search for. If filename is None,
then checks if any file is open.
Returns:
True: If filename is None and a file is open.
False: If filename is None and no files are open.
None: If filename is not None and the file isn't found.
integer: Index of file name in editor stack.
"""
if filename is None:
# Is there any file opened?
return len(self.data) > 0
else:
return self.has_filename(filename)
def get_index_from_filename(self, filename):
"""
Return the position index of a file in the tab bar of the editorstack
from its name.
"""
filenames = [d.filename for d in self.data]
return filenames.index(filename)
@Slot(int, int)
def move_editorstack_data(self, start, end):
"""Reorder editorstack.data so it is synchronized with the tab bar when
tabs are moved."""
if start < 0 or end < 0:
return
else:
steps = abs(end - start)
direction = (end-start) // steps # +1 for right, -1 for left
data = self.data
self.blockSignals(True)
for i in range(start, end, direction):
data[i], data[i+direction] = data[i+direction], data[i]
self.blockSignals(False)
self.refresh()
# ------ Close file, tabwidget...
def close_file(self, index=None, force=False):
"""Close file (index=None -> close current file)
Keep current file index unchanged (if current file
that is being closed)"""
current_index = self.get_stack_index()
count = self.get_stack_count()
if index is None:
if count > 0:
index = current_index
else:
self.find_widget.set_editor(None)
return
new_index = None
if count > 1:
if current_index == index:
new_index = self._get_previous_file_index()
else:
new_index = current_index
can_close_file = self.parent().plugin.can_close_file(
self.data[index].filename) if self.parent() else True
is_ok = (force or self.save_if_changed(cancelable=True, index=index)
and can_close_file)
if is_ok:
finfo = self.data[index]
self.threadmanager.close_threads(finfo)
# Removing editor reference from outline explorer settings:
if self.outlineexplorer is not None:
self.outlineexplorer.remove_editor(finfo.editor.oe_proxy)
filename = self.data[index].filename
self.remove_from_data(index)
finfo.editor.notify_close()
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms.
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
self.sig_close_file.emit(str(id(self)), filename)
self.opened_files_list_changed.emit()
self.update_code_analysis_actions.emit()
self.refresh_file_dependent_actions.emit()
self.update_plugin_title.emit()
editor = self.get_current_editor()
if editor:
editor.setFocus()
if new_index is not None:
if index < new_index:
new_index -= 1
self.set_stack_index(new_index)
self.add_last_closed_file(finfo.filename)
if finfo.filename in self.autosave.file_hashes:
del self.autosave.file_hashes[finfo.filename]
if self.get_stack_count() == 0 and self.create_new_file_if_empty:
self.sig_new_file[()].emit()
return False
self.__modify_stack_title()
return is_ok
def register_completion_capabilities(self, capabilities, language):
"""
Register completion server capabilities across all editors.
Parameters
----------
capabilities: dict
Capabilities supported by a language server.
language: str
Programming language for the language server (it has to be
in small caps).
"""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.register_completion_capabilities(capabilities)
def start_completion_services(self, language):
"""Notify language server availability to code editors."""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.start_completion_services()
def stop_completion_services(self, language):
"""Notify language server unavailability to code editors."""
for index in range(self.get_stack_count()):
editor = self.tabs.widget(index)
if editor.language.lower() == language:
editor.stop_completion_services()
def close_all_files(self):
"""Close all opened scripts"""
while self.close_file():
pass
def close_all_right(self):
""" Close all files opened to the right """
num = self.get_stack_index()
n = self.get_stack_count()
for __ in range(num, n-1):
self.close_file(num+1)
def close_all_but_this(self):
"""Close all files but the current one"""
self.close_all_right()
for __ in range(0, self.get_stack_count() - 1):
self.close_file(0)
def sort_file_tabs_alphabetically(self):
"""Sort open tabs alphabetically."""
while self.sorted() is False:
for i in range(0, self.tabs.tabBar().count()):
if(self.tabs.tabBar().tabText(i) >
self.tabs.tabBar().tabText(i + 1)):
self.tabs.tabBar().moveTab(i, i + 1)
def sorted(self):
"""Utility function for sort_file_tabs_alphabetically()."""
for i in range(0, self.tabs.tabBar().count() - 1):
if (self.tabs.tabBar().tabText(i) >
self.tabs.tabBar().tabText(i + 1)):
return False
return True
def add_last_closed_file(self, fname):
"""Add to last closed file list."""
if fname in self.last_closed_files:
self.last_closed_files.remove(fname)
self.last_closed_files.insert(0, fname)
if len(self.last_closed_files) > 10:
self.last_closed_files.pop(-1)
def get_last_closed_files(self):
return self.last_closed_files
def set_last_closed_files(self, fnames):
self.last_closed_files = fnames
# ------ Save
def save_if_changed(self, cancelable=False, index=None):
"""Ask user to save file if modified.
Args:
cancelable: Show Cancel button.
index: File to check for modification.
Returns:
False when save() fails or is cancelled.
True when save() is successful, there are no modifications,
or user selects No or NoToAll.
This function controls the message box prompt for saving
changed files. The actual save is performed in save() for
each index processed. This function also removes autosave files
corresponding to files the user chooses not to save.
"""
if index is None:
indexes = list(range(self.get_stack_count()))
else:
indexes = [index]
buttons = QMessageBox.Yes | QMessageBox.No
if cancelable:
buttons |= QMessageBox.Cancel
unsaved_nb = 0
for index in indexes:
if self.data[index].editor.document().isModified():
unsaved_nb += 1
if not unsaved_nb:
# No file to save
return True
if unsaved_nb > 1:
buttons |= int(QMessageBox.YesToAll | QMessageBox.NoToAll)
yes_all = no_all = False
for index in indexes:
self.set_stack_index(index)
finfo = self.data[index]
if finfo.filename == self.tempfile_path or yes_all:
if not self.save(index):
return False
elif no_all:
self.autosave.remove_autosave_file(finfo)
elif (finfo.editor.document().isModified() and
self.save_dialog_on_tests):
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified."
"<br>Do you want to save changes?"
) % osp.basename(finfo.filename),
buttons,
parent=self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
if not self.save(index):
return False
elif answer == QMessageBox.No:
self.autosave.remove_autosave_file(finfo.filename)
elif answer == QMessageBox.YesToAll:
if not self.save(index):
return False
yes_all = True
elif answer == QMessageBox.NoToAll:
self.autosave.remove_autosave_file(finfo.filename)
no_all = True
elif answer == QMessageBox.Cancel:
return False
return True
def compute_hash(self, fileinfo):
"""Compute hash of contents of editor.
Args:
fileinfo: FileInfo object associated to editor whose hash needs
to be computed.
Returns:
int: computed hash.
"""
txt = fileinfo.editor.get_text_with_eol()
return hash(txt)
def _write_to_file(self, fileinfo, filename):
"""Low-level function for writing text of editor to file.
Args:
fileinfo: FileInfo object associated to editor to be saved
filename: str with filename to save to
This is a low-level function that only saves the text to file in the
correct encoding without doing any error handling.
"""
txt = fileinfo.editor.get_text_with_eol()
fileinfo.encoding = encoding.write(txt, filename, fileinfo.encoding)
def save(self, index=None, force=False, save_new_files=True):
"""Write text of editor to a file.
Args:
index: self.data index to save. If None, defaults to
currentIndex().
force: Force save regardless of file state.
Returns:
True upon successful save or when file doesn't need to be saved.
False if save failed.
If the text isn't modified and it's not newly created, then the save
is aborted. If the file hasn't been saved before, then save_as()
is invoked. Otherwise, the file is written using the file name
currently in self.data. This function doesn't change the file name.
"""
if index is None:
# Save the currently edited file
if not self.get_stack_count():
return
index = self.get_stack_index()
finfo = self.data[index]
if not (finfo.editor.document().isModified() or
finfo.newly_created) and not force:
return True
if not osp.isfile(finfo.filename) and not force:
# File has not been saved yet
if save_new_files:
return self.save_as(index=index)
# The file doesn't need to be saved
return True
if self.always_remove_trailing_spaces:
self.remove_trailing_spaces(index)
if self.remove_trailing_newlines:
self.trim_trailing_newlines(index)
if self.add_newline:
self.add_newline_to_file(index)
if self.convert_eol_on_save:
# hack to account for the fact that the config file saves
# CR/LF/CRLF while set_os_eol_chars wants the os.name value.
osname_lookup = {'LF': 'posix', 'CRLF': 'nt', 'CR': 'mac'}
osname = osname_lookup[self.convert_eol_on_save_to]
self.set_os_eol_chars(osname=osname)
try:
if self.format_on_save and finfo.editor.formatting_enabled:
# Autoformat document and then save
finfo.editor.sig_stop_operation_in_progress.connect(
functools.partial(self._save_file, finfo, index))
finfo.editor.format_document()
else:
self._save_file(finfo, index)
return True
except EnvironmentError as error:
self.msgbox = QMessageBox(
QMessageBox.Critical,
_("Save Error"),
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(finfo.filename),
str(error)),
parent=self)
self.msgbox.exec_()
return False
def _save_file(self, finfo, index):
self._write_to_file(finfo, finfo.filename)
file_hash = self.compute_hash(finfo)
self.autosave.file_hashes[finfo.filename] = file_hash
self.autosave.remove_autosave_file(finfo.filename)
finfo.newly_created = False
self.encoding_changed.emit(finfo.encoding)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms.
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
# The filename is passed instead of an index in case the tabs
# have been rearranged. See spyder-ide/spyder#5703.
self.file_saved.emit(str(id(self)),
finfo.filename, finfo.filename)
finfo.editor.document().setModified(False)
self.modification_changed(index=index)
self.analyze_script(index)
finfo.editor.notify_save()
def file_saved_in_other_editorstack(self, original_filename, filename):
"""
File was just saved in another editorstack, let's synchronize!
This avoids file being automatically reloaded.
The original filename is passed instead of an index in case the tabs
on the editor stacks were moved and are now in a different order - see
spyder-ide/spyder#5703.
Filename is passed in case file was just saved as another name.
"""
index = self.has_filename(original_filename)
if index is None:
return
finfo = self.data[index]
finfo.newly_created = False
finfo.filename = to_text_string(filename)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
def select_savename(self, original_filename):
"""Select a name to save a file.
Args:
original_filename: Used in the dialog to display the current file
path and name.
Returns:
Normalized path for the selected file name or None if no name was
selected.
"""
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
# Don't use filters on KDE to not make the dialog incredible
# slow
# Fixes spyder-ide/spyder#4156.
if is_kde_desktop() and not is_anaconda():
filters = ''
selectedfilter = ''
else:
filters = self.edit_filters
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(original_filename)[1])
self.redirect_stdio.emit(False)
filename, _selfilter = getsavefilename(self, _("Save file"),
original_filename,
filters=filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails)
self.redirect_stdio.emit(True)
if filename:
return osp.normpath(filename)
return None
def save_as(self, index=None):
"""Save file as...
Args:
index: self.data index for the file to save.
Returns:
False if no file name was selected or if save() was unsuccessful.
True is save() was successful.
Gets the new file name from select_savename(). If no name is chosen,
then the save_as() aborts. Otherwise, the current stack is checked
to see if the selected name already exists and, if so, then the tab
with that name is closed.
The current stack (self.data) and current tabs are updated with the
new name and other file info. The text is written with the new
name using save() and the name change is propagated to the other stacks
via the file_renamed_in_data signal.
"""
if index is None:
# Save the currently edited file
index = self.get_stack_index()
finfo = self.data[index]
original_newly_created = finfo.newly_created
# The next line is necessary to avoid checking if the file exists
# While running __check_file_status
# See spyder-ide/spyder#3678 and spyder-ide/spyder#3026.
finfo.newly_created = True
original_filename = finfo.filename
filename = self.select_savename(original_filename)
if filename:
ao_index = self.has_filename(filename)
# Note: ao_index == index --> saving an untitled file
if ao_index is not None and ao_index != index:
if not self.close_file(ao_index):
return
if ao_index < index:
index -= 1
new_index = self.rename_in_data(original_filename,
new_filename=filename)
# We pass self object ID as a QString, because otherwise it would
# depend on the platform: long for 64bit, int for 32bit. Replacing
# by long all the time is not working on some 32bit platforms
# See spyder-ide/spyder#1094 and spyder-ide/spyder#1098.
self.file_renamed_in_data.emit(str(id(self)),
original_filename, filename)
ok = self.save(index=new_index, force=True)
self.refresh(new_index)
self.set_stack_index(new_index)
return ok
else:
finfo.newly_created = original_newly_created
return False
def save_copy_as(self, index=None):
"""Save copy of file as...
Args:
index: self.data index for the file to save.
Returns:
False if no file name was selected or if save() was unsuccessful.
True is save() was successful.
Gets the new file name from select_savename(). If no name is chosen,
then the save_copy_as() aborts. Otherwise, the current stack is
checked to see if the selected name already exists and, if so, then the
tab with that name is closed.
Unlike save_as(), this calls write() directly instead of using save().
The current file and tab aren't changed at all. The copied file is
opened in a new tab.
"""
if index is None:
# Save the currently edited file
index = self.get_stack_index()
finfo = self.data[index]
original_filename = finfo.filename
filename = self.select_savename(original_filename)
if filename:
ao_index = self.has_filename(filename)
# Note: ao_index == index --> saving an untitled file
if ao_index is not None and ao_index != index:
if not self.close_file(ao_index):
return
if ao_index < index:
index -= 1
try:
self._write_to_file(finfo, filename)
# open created copy file
self.plugin_load.emit(filename)
return True
except EnvironmentError as error:
self.msgbox = QMessageBox(
QMessageBox.Critical,
_("Save Error"),
_("<b>Unable to save file '%s'</b>"
"<br><br>Error message:<br>%s"
) % (osp.basename(finfo.filename),
str(error)),
parent=self)
self.msgbox.exec_()
else:
return False
def save_all(self, save_new_files=True):
"""Save all opened files.
Iterate through self.data and call save() on any modified files.
"""
all_saved = True
for index in range(self.get_stack_count()):
if self.data[index].editor.document().isModified():
all_saved &= self.save(index, save_new_files=save_new_files)
return all_saved
#------ Update UI
def start_stop_analysis_timer(self):
self.is_analysis_done = False
self.analysis_timer.stop()
self.analysis_timer.start()
def analyze_script(self, index=None):
"""Analyze current script for TODOs."""
if self.is_analysis_done:
return
if index is None:
index = self.get_stack_index()
if self.data and len(self.data) > index:
finfo = self.data[index]
if self.todolist_enabled:
finfo.run_todo_finder()
self.is_analysis_done = True
def set_todo_results(self, filename, todo_results):
"""Synchronize todo results between editorstacks"""
index = self.has_filename(filename)
if index is None:
return
self.data[index].set_todo_results(todo_results)
def get_todo_results(self):
if self.data:
return self.data[self.get_stack_index()].todo_results
def current_changed(self, index):
"""Stack index has changed"""
editor = self.get_current_editor()
if index != -1:
editor.setFocus()
logger.debug("Set focus to: %s" % editor.filename)
else:
self.reset_statusbar.emit()
self.opened_files_list_changed.emit()
self.stack_history.refresh()
self.stack_history.remove_and_append(index)
# Needed to avoid an error generated after moving/renaming
# files outside Spyder while in debug mode.
# See spyder-ide/spyder#8749.
try:
logger.debug("Current changed: %d - %s" %
(index, self.data[index].editor.filename))
except IndexError:
pass
self.update_plugin_title.emit()
# Make sure that any replace happens in the editor on top
# See spyder-ide/spyder#9688.
self.find_widget.set_editor(editor, refresh=False)
if editor is not None:
# Needed in order to handle the close of files open in a directory
# that has been renamed. See spyder-ide/spyder#5157.
try:
line, col = editor.get_cursor_line_column()
self.current_file_changed.emit(self.data[index].filename,
editor.get_position('cursor'),
line, col)
except IndexError:
pass
def _get_previous_file_index(self):
"""Return the penultimate element of the stack history."""
try:
return self.stack_history[-2]
except IndexError:
return None
def tab_navigation_mru(self, forward=True):
"""
Tab navigation with "most recently used" behaviour.
It's fired when pressing 'go to previous file' or 'go to next file'
shortcuts.
forward:
True: move to next file
False: move to previous file
"""
self.tabs_switcher = TabSwitcherWidget(self, self.stack_history,
self.tabs)
self.tabs_switcher.show()
self.tabs_switcher.select_row(1 if forward else -1)
self.tabs_switcher.setFocus()
def focus_changed(self):
"""Editor focus has changed"""
fwidget = QApplication.focusWidget()
for finfo in self.data:
if fwidget is finfo.editor:
if finfo.editor.operation_in_progress:
self.spinner.start()
else:
self.spinner.stop()
self.refresh()
self.editor_focus_changed.emit()
def _refresh_outlineexplorer(self, index=None, update=True, clear=False):
"""Refresh outline explorer panel"""
oe = self.outlineexplorer
if oe is None:
return
if index is None:
index = self.get_stack_index()
if self.data and len(self.data) > index:
finfo = self.data[index]
oe.setEnabled(True)
oe.set_current_editor(finfo.editor.oe_proxy,
update=update, clear=clear)
if index != self.get_stack_index():
# The last file added to the outline explorer is not the
# currently focused one in the editor stack. Therefore,
# we need to force a refresh of the outline explorer to set
# the current editor to the currently focused one in the
# editor stack. See spyder-ide/spyder#8015.
self._refresh_outlineexplorer(update=False)
return
self._sync_outlineexplorer_file_order()
def _sync_outlineexplorer_file_order(self):
"""
Order the root file items of the outline explorer as in the tabbar
of the current EditorStack.
"""
if self.outlineexplorer is not None:
self.outlineexplorer.treewidget.set_editor_ids_order(
[finfo.editor.get_document_id() for finfo in self.data])
def __refresh_statusbar(self, index):
"""Refreshing statusbar widgets"""
if self.data and len(self.data) > index:
finfo = self.data[index]
self.encoding_changed.emit(finfo.encoding)
# Refresh cursor position status:
line, index = finfo.editor.get_cursor_line_column()
self.sig_editor_cursor_position_changed.emit(line, index)
def __refresh_readonly(self, index):
if self.data and len(self.data) > index:
finfo = self.data[index]
read_only = not QFileInfo(finfo.filename).isWritable()
if not osp.isfile(finfo.filename):
# This is an 'untitledX.py' file (newly created)
read_only = False
elif os.name == 'nt':
try:
# Try to open the file to see if its permissions allow
# to write on it
# Fixes spyder-ide/spyder#10657
fd = os.open(finfo.filename, os.O_RDWR)
os.close(fd)
except (IOError, OSError):
read_only = True
finfo.editor.setReadOnly(read_only)
self.readonly_changed.emit(read_only)
def __check_file_status(self, index):
"""Check if file has been changed in any way outside Spyder:
1. removed, moved or renamed outside Spyder
2. modified outside Spyder"""
if self.__file_status_flag:
# Avoid infinite loop: when the QMessageBox.question pops, it
# gets focus and then give it back to the CodeEditor instance,
# triggering a refresh cycle which calls this method
return
self.__file_status_flag = True
if len(self.data) <= index:
index = self.get_stack_index()
finfo = self.data[index]
name = osp.basename(finfo.filename)
if finfo.newly_created:
# File was just created (not yet saved): do nothing
# (do not return because of the clean-up at the end of the method)
pass
elif not osp.isfile(finfo.filename):
# File doesn't exist (removed, moved or offline):
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> is unavailable "
"(this file may have been removed, moved "
"or renamed outside Spyder)."
"<br>Do you want to close it?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.close_file(index)
else:
finfo.newly_created = True
finfo.editor.document().setModified(True)
self.modification_changed(index=index)
else:
# Else, testing if it has been modified elsewhere:
lastm = QFileInfo(finfo.filename).lastModified()
if to_text_string(lastm.toString()) \
!= to_text_string(finfo.lastmodified.toString()):
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Question,
self.title,
_("<b>%s</b> has been modified outside Spyder."
"<br>Do you want to reload it and lose all "
"your changes?") % name,
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer == QMessageBox.Yes:
self.reload(index)
else:
finfo.lastmodified = lastm
else:
self.reload(index)
# Finally, resetting temporary flag:
self.__file_status_flag = False
def __modify_stack_title(self):
for index, finfo in enumerate(self.data):
state = finfo.editor.document().isModified()
self.set_stack_title(index, state)
def refresh(self, index=None):
"""Refresh tabwidget"""
if index is None:
index = self.get_stack_index()
# Set current editor
if self.get_stack_count():
index = self.get_stack_index()
finfo = self.data[index]
editor = finfo.editor
editor.setFocus()
self._refresh_outlineexplorer(index, update=False)
self.update_code_analysis_actions.emit()
self.__refresh_statusbar(index)
self.__refresh_readonly(index)
self.__check_file_status(index)
self.__modify_stack_title()
self.update_plugin_title.emit()
else:
editor = None
# Update the modification-state-dependent parameters
self.modification_changed()
# Update FindReplace binding
self.find_widget.set_editor(editor, refresh=False)
def modification_changed(self, state=None, index=None, editor_id=None):
"""
Current editor's modification state has changed
--> change tab title depending on new modification state
--> enable/disable save/save all actions
"""
if editor_id is not None:
for index, _finfo in enumerate(self.data):
if id(_finfo.editor) == editor_id:
break
# This must be done before refreshing save/save all actions:
# (otherwise Save/Save all actions will always be enabled)
self.opened_files_list_changed.emit()
# --
if index is None:
index = self.get_stack_index()
if index == -1:
return
finfo = self.data[index]
if state is None:
state = finfo.editor.document().isModified() or finfo.newly_created
self.set_stack_title(index, state)
# Toggle save/save all actions state
self.save_action.setEnabled(state)
self.refresh_save_all_action.emit()
# Refreshing eol mode
eol_chars = finfo.editor.get_line_separator()
self.refresh_eol_chars(eol_chars)
self.stack_history.refresh()
def refresh_eol_chars(self, eol_chars):
os_name = sourcecode.get_os_name_from_eol_chars(eol_chars)
self.sig_refresh_eol_chars.emit(os_name)
# ------ Load, reload
def reload(self, index):
"""Reload file from disk."""
finfo = self.data[index]
logger.debug("Reloading {}".format(finfo.filename))
txt, finfo.encoding = encoding.read(finfo.filename)
finfo.lastmodified = QFileInfo(finfo.filename).lastModified()
position = finfo.editor.get_position('cursor')
finfo.editor.set_text(txt)
finfo.editor.document().setModified(False)
self.autosave.file_hashes[finfo.filename] = hash(txt)
finfo.editor.set_cursor_position(position)
#XXX CodeEditor-only: re-scan the whole text to rebuild outline
# explorer data from scratch (could be optimized because
# rehighlighting text means searching for all syntax coloring
# patterns instead of only searching for class/def patterns which
# would be sufficient for outline explorer data.
finfo.editor.rehighlight()
def revert(self):
"""Revert file from disk."""
index = self.get_stack_index()
finfo = self.data[index]
logger.debug("Reverting {}".format(finfo.filename))
filename = finfo.filename
if finfo.editor.document().isModified():
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("All changes to <b>%s</b> will be lost."
"<br>Do you want to revert file from disk?"
) % osp.basename(filename),
QMessageBox.Yes | QMessageBox.No,
self)
answer = self.msgbox.exec_()
if answer != QMessageBox.Yes:
return
self.reload(index)
def create_new_editor(self, fname, enc, txt, set_current, new=False,
cloned_from=None, add_where='end'):
"""
Create a new editor instance
Returns finfo object (instead of editor as in previous releases)
"""
editor = codeeditor.CodeEditor(self)
editor.go_to_definition.connect(
lambda fname, line, column: self.sig_go_to_definition.emit(
fname, line, column))
finfo = FileInfo(fname, enc, editor, new, self.threadmanager)
self.add_to_data(finfo, set_current, add_where)
finfo.sig_send_to_help.connect(self.send_to_help)
finfo.sig_show_object_info.connect(self.inspect_current_object)
finfo.todo_results_changed.connect(
lambda: self.todo_results_changed.emit())
finfo.edit_goto.connect(lambda fname, lineno, name:
self.edit_goto.emit(fname, lineno, name))
finfo.sig_save_bookmarks.connect(lambda s1, s2:
self.sig_save_bookmarks.emit(s1, s2))
editor.sig_run_selection.connect(self.run_selection)
editor.sig_run_cell.connect(self.run_cell)
editor.sig_debug_cell.connect(self.debug_cell)
editor.sig_run_cell_and_advance.connect(self.run_cell_and_advance)
editor.sig_re_run_last_cell.connect(self.re_run_last_cell)
editor.sig_new_file.connect(self.sig_new_file.emit)
editor.sig_breakpoints_saved.connect(self.sig_breakpoints_saved)
editor.sig_process_code_analysis.connect(
lambda: self.update_code_analysis_actions.emit())
editor.sig_refresh_formatting.connect(self.sig_refresh_formatting)
language = get_file_language(fname, txt)
editor.setup_editor(
linenumbers=self.linenumbers_enabled,
show_blanks=self.blanks_enabled,
underline_errors=self.underline_errors_enabled,
scroll_past_end=self.scrollpastend_enabled,
edge_line=self.edgeline_enabled,
edge_line_columns=self.edgeline_columns,
language=language,
markers=self.has_markers(),
font=self.default_font,
color_scheme=self.color_scheme,
wrap=self.wrap_enabled,
tab_mode=self.tabmode_enabled,
strip_mode=self.stripmode_enabled,
intelligent_backspace=self.intelligent_backspace_enabled,
automatic_completions=self.automatic_completions_enabled,
automatic_completions_after_chars=self.automatic_completion_chars,
automatic_completions_after_ms=self.automatic_completion_ms,
code_snippets=self.code_snippets_enabled,
completions_hint=self.completions_hint_enabled,
completions_hint_after_ms=self.completions_hint_after_ms,
hover_hints=self.hover_hints_enabled,
highlight_current_line=self.highlight_current_line_enabled,
highlight_current_cell=self.highlight_current_cell_enabled,
occurrence_highlighting=self.occurrence_highlighting_enabled,
occurrence_timeout=self.occurrence_highlighting_timeout,
close_parentheses=self.close_parentheses_enabled,
close_quotes=self.close_quotes_enabled,
add_colons=self.add_colons_enabled,
auto_unindent=self.auto_unindent_enabled,
indent_chars=self.indent_chars,
tab_stop_width_spaces=self.tab_stop_width_spaces,
cloned_from=cloned_from,
filename=fname,
show_class_func_dropdown=self.show_class_func_dropdown,
indent_guides=self.indent_guides,
folding=self.code_folding_enabled,
remove_trailing_spaces=self.always_remove_trailing_spaces,
remove_trailing_newlines=self.remove_trailing_newlines,
add_newline=self.add_newline,
format_on_save=self.format_on_save
)
if cloned_from is None:
editor.set_text(txt)
editor.document().setModified(False)
finfo.text_changed_at.connect(
lambda fname, position:
self.text_changed_at.emit(fname, position))
editor.sig_cursor_position_changed.connect(
self.editor_cursor_position_changed)
editor.textChanged.connect(self.start_stop_analysis_timer)
# Register external panels
for panel_class, args, kwargs, position in self.external_panels:
self.register_panel(
panel_class, *args, position=position, **kwargs)
def perform_completion_request(lang, method, params):
self.sig_perform_completion_request.emit(lang, method, params)
editor.sig_perform_completion_request.connect(
perform_completion_request)
editor.sig_start_operation_in_progress.connect(self.spinner.start)
editor.sig_stop_operation_in_progress.connect(self.spinner.stop)
editor.modificationChanged.connect(
lambda state: self.modification_changed(
state, editor_id=id(editor)))
editor.focus_in.connect(self.focus_changed)
editor.zoom_in.connect(lambda: self.zoom_in.emit())
editor.zoom_out.connect(lambda: self.zoom_out.emit())
editor.zoom_reset.connect(lambda: self.zoom_reset.emit())
editor.sig_eol_chars_changed.connect(
lambda eol_chars: self.refresh_eol_chars(eol_chars))
self.find_widget.set_editor(editor)
self.refresh_file_dependent_actions.emit()
self.modification_changed(index=self.data.index(finfo))
# To update the outline explorer.
editor.oe_proxy = OutlineExplorerProxyEditor(editor, editor.filename)
if self.outlineexplorer is not None:
self.outlineexplorer.register_editor(editor.oe_proxy)
# Needs to reset the highlighting on startup in case the PygmentsSH
# is in use
editor.run_pygments_highlighter()
options = {
'language': editor.language,
'filename': editor.filename,
'codeeditor': editor
}
self.sig_open_file.emit(options)
if self.get_stack_index() == 0:
self.current_changed(0)
return finfo
def editor_cursor_position_changed(self, line, index):
"""Cursor position of one of the editor in the stack has changed"""
self.sig_editor_cursor_position_changed.emit(line, index)
@Slot(str, str, bool)
def send_to_help(self, name, signature, force=False):
"""qstr1: obj_text, qstr2: argpspec, qstr3: note, qstr4: doc_text"""
if not force and not self.help_enabled:
return
editor = self.get_current_editor()
language = editor.language.lower()
signature = to_text_string(signature)
signature = unicodedata.normalize("NFKD", signature)
parts = signature.split('\n\n')
definition = parts[0]
documentation = '\n\n'.join(parts[1:])
args = ''
if '(' in definition and language == 'python':
args = definition[definition.find('('):]
else:
documentation = signature
doc = {
'obj_text': '',
'name': name,
'argspec': args,
'note': '',
'docstring': documentation,
'force_refresh': force,
'path': editor.filename
}
self.sig_help_requested.emit(doc)
def new(self, filename, encoding, text, default_content=False,
empty=False):
"""
Create new filename with *encoding* and *text*
"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
if not empty:
finfo.editor.insert_text(os.linesep)
if default_content:
finfo.default = True
finfo.editor.document().setModified(False)
return finfo
def load(self, filename, set_current=True, add_where='end',
processevents=True):
"""
Load filename, create an editor instance and return it
This also sets the hash of the loaded file in the autosave component.
*Warning* This is loading file, creating editor but not executing
the source code analysis -- the analysis must be done by the editor
plugin (in case multiple editorstack instances are handled)
"""
filename = osp.abspath(to_text_string(filename))
if processevents:
self.starting_long_process.emit(_("Loading %s...") % filename)
text, enc = encoding.read(filename)
self.autosave.file_hashes[filename] = hash(text)
finfo = self.create_new_editor(filename, enc, text, set_current,
add_where=add_where)
index = self.data.index(finfo)
if processevents:
self.ending_long_process.emit("")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
self.msgbox = QMessageBox(
QMessageBox.Warning,
self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok,
self)
self.msgbox.exec_()
self.set_os_eol_chars(index)
self.is_analysis_done = False
self.analyze_script(index)
return finfo
def set_os_eol_chars(self, index=None, osname=None):
"""Sets the EOL character(s) based on the operating system.
If `osname` is None, then the default line endings for the current
operating system (`os.name` value) will be used.
`osname` can be one of:
('posix', 'nt', 'java')
"""
if osname is None:
osname = os.name
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
eol_chars = sourcecode.get_eol_chars_from_os_name(osname)
finfo.editor.set_eol_chars(eol_chars)
finfo.editor.document().setModified(True)
def remove_trailing_spaces(self, index=None):
"""Remove trailing spaces"""
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.trim_trailing_spaces()
def trim_trailing_newlines(self, index=None):
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.trim_trailing_newlines()
def add_newline_to_file(self, index=None):
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.add_newline_to_file()
def fix_indentation(self, index=None):
"""Replace tab characters by spaces"""
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.fix_indentation()
def format_document_or_selection(self, index=None):
if index is None:
index = self.get_stack_index()
finfo = self.data[index]
finfo.editor.format_document_or_range()
# ------ Run
def run_selection(self):
"""
Run selected text or current line in console.
If some text is selected, then execute that text in console.
If no text is selected, then execute current line, unless current line
is empty. Then, advance cursor to next line. If cursor is on last line
and that line is not empty, then add a new blank line and move the
cursor there. If cursor is on last line and that line is empty, then do
not move cursor.
"""
text = self.get_current_editor().get_selection_as_executable_code()
if text:
self.exec_in_extconsole.emit(text.rstrip(), self.focus_to_editor)
return
editor = self.get_current_editor()
line = editor.get_current_line()
text = line.lstrip()
if text:
self.exec_in_extconsole.emit(text, self.focus_to_editor)
if editor.is_cursor_on_last_line() and text:
editor.append(editor.get_line_separator())
editor.move_cursor_to_next('line', 'down')
def run_cell(self, debug=False):
"""Run current cell."""
text, block = self.get_current_editor().get_cell_as_executable_code()
finfo = self.get_current_finfo()
editor = self.get_current_editor()
name = cell_name(block)
filename = finfo.filename
self._run_cell_text(text, editor, (filename, name), debug)
def debug_cell(self):
"""Debug current cell."""
self.run_cell(debug=True)
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
self.run_cell()
self.advance_cell()
def advance_cell(self, reverse=False):
"""Advance to the next cell.
reverse = True --> go to previous cell.
"""
if not reverse:
move_func = self.get_current_editor().go_to_next_cell
else:
move_func = self.get_current_editor().go_to_previous_cell
if self.focus_to_editor:
move_func()
else:
term = QApplication.focusWidget()
move_func()
term.setFocus()
term = QApplication.focusWidget()
move_func()
term.setFocus()
def re_run_last_cell(self):
"""Run the previous cell again."""
if self.last_cell_call is None:
return
filename, cell_name = self.last_cell_call
index = self.has_filename(filename)
if index is None:
return
editor = self.data[index].editor
try:
text = editor.get_cell_code(cell_name)
except RuntimeError:
return
self._run_cell_text(text, editor, (filename, cell_name))
def _run_cell_text(self, text, editor, cell_id, debug=False):
"""Run cell code in the console.
Cell code is run in the console by copying it to the console if
`self.run_cell_copy` is ``True`` otherwise by using the `run_cell`
function.
Parameters
----------
text : str
The code in the cell as a string.
line : int
The starting line number of the cell in the file.
"""
(filename, cell_name) = cell_id
if editor.is_python_or_ipython():
args = (text, cell_name, filename, self.run_cell_copy)
if debug:
self.debug_cell_in_ipyclient.emit(*args)
else:
self.run_cell_in_ipyclient.emit(*args)
if self.focus_to_editor:
editor.setFocus()
else:
console = QApplication.focusWidget()
console.setFocus()
# ------ Drag and drop
def dragEnterEvent(self, event):
"""
Reimplemented Qt method.
Inform Qt about the types of data that the widget accepts.
"""
logger.debug("dragEnterEvent was received")
source = event.mimeData()
# The second check is necessary on Windows, where source.hasUrls()
# can return True but source.urls() is []
# The third check is needed since a file could be dropped from
# compressed files. In Windows mimedata2url(source) returns None
# Fixes spyder-ide/spyder#5218.
has_urls = source.hasUrls()
has_text = source.hasText()
urls = source.urls()
all_urls = mimedata2url(source)
logger.debug("Drag event source has_urls: {}".format(has_urls))
logger.debug("Drag event source urls: {}".format(urls))
logger.debug("Drag event source all_urls: {}".format(all_urls))
logger.debug("Drag event source has_text: {}".format(has_text))
if has_urls and urls and all_urls:
text = [encoding.is_text_file(url) for url in all_urls]
logger.debug("Accept proposed action?: {}".format(any(text)))
if any(text):
event.acceptProposedAction()
else:
event.ignore()
elif source.hasText():
event.acceptProposedAction()
elif os.name == 'nt':
# This covers cases like dragging from compressed files,
# which can be opened by the Editor if they are plain
# text, but doesn't come with url info.
# Fixes spyder-ide/spyder#2032.
logger.debug("Accept proposed action on Windows")
event.acceptProposedAction()
else:
logger.debug("Ignore drag event")
event.ignore()
def dropEvent(self, event):
"""
Reimplement Qt method.
Unpack dropped data and handle it.
"""
logger.debug("dropEvent was received")
source = event.mimeData()
# The second check is necessary when mimedata2url(source)
# returns None.
# Fixes spyder-ide/spyder#7742.
if source.hasUrls() and mimedata2url(source):
files = mimedata2url(source)
files = [f for f in files if encoding.is_text_file(f)]
files = set(files or [])
for fname in files:
self.plugin_load.emit(fname)
elif source.hasText():
editor = self.get_current_editor()
if editor is not None:
editor.insert_text(source.text())
else:
event.ignore()
event.acceptProposedAction()
def register_panel(self, panel_class, *args,
position=Panel.Position.LEFT, **kwargs):
"""Register a panel in all codeeditors."""
if (panel_class, args, kwargs, position) not in self.external_panels:
self.external_panels.append((panel_class, args, kwargs, position))
for finfo in self.data:
cur_panel = finfo.editor.panels.register(
panel_class(*args, **kwargs), position=position)
if not cur_panel.isVisible():
cur_panel.setVisible(True)
class EditorSplitter(QSplitter):
"""QSplitter for editor windows."""
def __init__(self, parent, plugin, menu_actions, first=False,
register_editorstack_cb=None, unregister_editorstack_cb=None):
"""Create a splitter for dividing an editor window into panels.
Adds a new EditorStack instance to this splitter. If it's not
the first splitter, clones the current EditorStack from the plugin.
Args:
parent: Parent widget.
plugin: Plugin this widget belongs to.
menu_actions: QActions to include from the parent.
first: Boolean if this is the first splitter in the editor.
register_editorstack_cb: Callback to register the EditorStack.
Defaults to plugin.register_editorstack() to
register the EditorStack with the Editor plugin.
unregister_editorstack_cb: Callback to unregister the EditorStack.
Defaults to plugin.unregister_editorstack() to
unregister the EditorStack with the Editor plugin.
"""
QSplitter.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setChildrenCollapsible(False)
self.toolbar_list = None
self.menu_list = None
self.plugin = plugin
if register_editorstack_cb is None:
register_editorstack_cb = self.plugin.register_editorstack
self.register_editorstack_cb = register_editorstack_cb
if unregister_editorstack_cb is None:
unregister_editorstack_cb = self.plugin.unregister_editorstack
self.unregister_editorstack_cb = unregister_editorstack_cb
self.menu_actions = menu_actions
self.editorstack = EditorStack(self, menu_actions)
self.register_editorstack_cb(self.editorstack)
if not first:
self.plugin.clone_editorstack(editorstack=self.editorstack)
self.editorstack.destroyed.connect(lambda: self.editorstack_closed())
self.editorstack.sig_split_vertically.connect(
lambda: self.split(orientation=Qt.Vertical))
self.editorstack.sig_split_horizontally.connect(
lambda: self.split(orientation=Qt.Horizontal))
self.addWidget(self.editorstack)
if not running_under_pytest():
self.editorstack.set_color_scheme(plugin.get_color_scheme())
self.setStyleSheet(self._stylesheet)
def closeEvent(self, event):
"""Override QWidget closeEvent().
This event handler is called with the given event when Qt
receives a window close request from a top-level widget.
"""
QSplitter.closeEvent(self, event)
def __give_focus_to_remaining_editor(self):
focus_widget = self.plugin.get_focus_widget()
if focus_widget is not None:
focus_widget.setFocus()
def editorstack_closed(self):
logger.debug("method 'editorstack_closed':")
logger.debug(" self : %r" % self)
try:
self.unregister_editorstack_cb(self.editorstack)
self.editorstack = None
close_splitter = self.count() == 1
except (RuntimeError, AttributeError):
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorstack just closed was the last widget in this QSplitter
self.close()
return
self.__give_focus_to_remaining_editor()
def editorsplitter_closed(self):
logger.debug("method 'editorsplitter_closed':")
logger.debug(" self : %r" % self)
try:
close_splitter = self.count() == 1 and self.editorstack is None
except RuntimeError:
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorsplitter just closed was the last widget in this QSplitter
self.close()
return
elif self.count() == 2 and self.editorstack:
# back to the initial state: a single editorstack instance,
# as a single widget in this QSplitter: orientation may be changed
self.editorstack.reset_orientation()
self.__give_focus_to_remaining_editor()
def split(self, orientation=Qt.Vertical):
"""Create and attach a new EditorSplitter to the current EditorSplitter.
The new EditorSplitter widget will contain an EditorStack that
is a clone of the current EditorStack.
A single EditorSplitter instance can be split multiple times, but the
orientation will be the same for all the direct splits. If one of
the child splits is split, then that split can have a different
orientation.
"""
self.setOrientation(orientation)
self.editorstack.set_orientation(orientation)
editorsplitter = EditorSplitter(self.parent(), self.plugin,
self.menu_actions,
register_editorstack_cb=self.register_editorstack_cb,
unregister_editorstack_cb=self.unregister_editorstack_cb)
self.addWidget(editorsplitter)
editorsplitter.destroyed.connect(self.editorsplitter_closed)
current_editor = editorsplitter.editorstack.get_current_editor()
if current_editor is not None:
current_editor.setFocus()
def iter_editorstacks(self):
"""Return the editor stacks for this splitter and every first child.
Note: If a splitter contains more than one splitter as a direct
child, only the first child's editor stack is included.
Returns:
List of tuples containing (EditorStack instance, orientation).
"""
editorstacks = [(self.widget(0), self.orientation())]
if self.count() > 1:
editorsplitter = self.widget(1)
editorstacks += editorsplitter.iter_editorstacks()
return editorstacks
def get_layout_settings(self):
"""Return the layout state for this splitter and its children.
Record the current state, including file names and current line
numbers, of the splitter panels.
Returns:
A dictionary containing keys {hexstate, sizes, splitsettings}.
hexstate: String of saveState() for self.
sizes: List for size() for self.
splitsettings: List of tuples of the form
(orientation, cfname, clines) for each EditorSplitter
and its EditorStack.
orientation: orientation() for the editor
splitter (which may be a child of self).
cfname: EditorStack current file name.
clines: Current line number for each file in the
EditorStack.
"""
splitsettings = []
for editorstack, orientation in self.iter_editorstacks():
clines = []
cfname = ''
# XXX - this overrides value from the loop to always be False?
orientation = False
if hasattr(editorstack, 'data'):
clines = [finfo.editor.get_cursor_line_number()
for finfo in editorstack.data]
cfname = editorstack.get_current_filename()
splitsettings.append((orientation == Qt.Vertical, cfname, clines))
return dict(hexstate=qbytearray_to_str(self.saveState()),
sizes=self.sizes(), splitsettings=splitsettings)
def set_layout_settings(self, settings, dont_goto=None):
"""Restore layout state for the splitter panels.
Apply the settings to restore a saved layout within the editor. If
the splitsettings key doesn't exist, then return without restoring
any settings.
The current EditorSplitter (self) calls split() for each element
in split_settings, thus recreating the splitter panels from the saved
state. split() also clones the editorstack, which is then
iterated over to restore the saved line numbers on each file.
The size and positioning of each splitter panel is restored from
hexstate.
Args:
settings: A dictionary with keys {hexstate, sizes, orientation}
that define the layout for the EditorSplitter panels.
dont_goto: Defaults to None, which positions the cursor to the
end of the editor. If there's a value, positions the
cursor on the saved line number for each editor.
"""
splitsettings = settings.get('splitsettings')
if splitsettings is None:
return
splitter = self
editor = None
for i, (is_vertical, cfname, clines) in enumerate(splitsettings):
if i > 0:
splitter.split(Qt.Vertical if is_vertical else Qt.Horizontal)
splitter = splitter.widget(1)
editorstack = splitter.widget(0)
for j, finfo in enumerate(editorstack.data):
editor = finfo.editor
# TODO: go_to_line is not working properly (the line it jumps
# to is not the corresponding to that file). This will be fixed
# in a future PR (which will fix spyder-ide/spyder#3857).
if dont_goto is not None:
# Skip go to line for first file because is already there.
pass
else:
try:
editor.go_to_line(clines[j])
except IndexError:
pass
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
sizes = settings.get('sizes')
if sizes is not None:
self.setSizes(sizes)
if editor is not None:
editor.clearFocus()
editor.setFocus()
@property
def _stylesheet(self):
css = qstylizer.style.StyleSheet()
css.QSplitter.setValues(
background=QStylePalette.COLOR_BACKGROUND_1
)
return css.toString()
class EditorWidget(QSplitter):
CONF_SECTION = 'editor'
def __init__(self, parent, plugin, menu_actions):
QSplitter.__init__(self, parent)
self.setAttribute(Qt.WA_DeleteOnClose)
statusbar = parent.statusBar() # Create a status bar
self.vcs_status = VCSStatus(self)
self.cursorpos_status = CursorPositionStatus(self)
self.encoding_status = EncodingStatus(self)
self.eol_status = EOLStatus(self)
self.readwrite_status = ReadWriteStatus(self)
statusbar.insertPermanentWidget(0, self.readwrite_status)
statusbar.insertPermanentWidget(0, self.eol_status)
statusbar.insertPermanentWidget(0, self.encoding_status)
statusbar.insertPermanentWidget(0, self.cursorpos_status)
statusbar.insertPermanentWidget(0, self.vcs_status)
self.editorstacks = []
self.plugin = plugin
self.find_widget = FindReplace(self, enable_replace=True)
self.plugin.register_widget_shortcuts(self.find_widget)
self.find_widget.hide()
# TODO: Check this initialization once the editor is migrated to the
# new API
self.outlineexplorer = OutlineExplorerWidget(
'outline_explorer',
plugin,
self,
context=f'editor_window_{str(id(self))}'
)
self.outlineexplorer.edit_goto.connect(
lambda filenames, goto, word:
plugin.load(filenames=filenames, goto=goto, word=word,
editorwindow=self.parent()))
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editorsplitter = EditorSplitter(self, plugin, menu_actions,
register_editorstack_cb=self.register_editorstack,
unregister_editorstack_cb=self.unregister_editorstack)
self.editorsplitter = editorsplitter
editor_layout.addWidget(editorsplitter)
editor_layout.addWidget(self.find_widget)
splitter = QSplitter(self)
splitter.setContentsMargins(0, 0, 0, 0)
splitter.addWidget(editor_widgets)
splitter.addWidget(self.outlineexplorer)
splitter.setStretchFactor(0, 5)
splitter.setStretchFactor(1, 1)
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
logger.debug("EditorWidget.register_editorstack: %r" % editorstack)
self.__print_editorstacks()
self.plugin.last_focused_editorstack[self.parent()] = editorstack
editorstack.set_closable(len(self.editorstacks) > 1)
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.update_readonly)
editorstack.encoding_changed.connect(
self.encoding_status.update_encoding)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.update_cursor_position)
editorstack.sig_refresh_eol_chars.connect(self.eol_status.update_eol)
self.plugin.register_editorstack(editorstack)
def __print_editorstacks(self):
logger.debug("%d editorstack(s) in editorwidget:" %
len(self.editorstacks))
for edst in self.editorstacks:
logger.debug(" %r" % edst)
def unregister_editorstack(self, editorstack):
logger.debug("EditorWidget.unregister_editorstack: %r" % editorstack)
self.plugin.unregister_editorstack(editorstack)
self.editorstacks.pop(self.editorstacks.index(editorstack))
self.__print_editorstacks()
class EditorMainWindow(QMainWindow):
def __init__(self, plugin, menu_actions, toolbar_list, menu_list):
QMainWindow.__init__(self)
self.setAttribute(Qt.WA_DeleteOnClose)
self.plugin = plugin
self.window_size = None
self.editorwidget = EditorWidget(self, plugin, menu_actions)
self.setCentralWidget(self.editorwidget)
# Setting interface theme
self.setStyleSheet(str(APP_STYLESHEET))
# Give focus to current editor to update/show all status bar widgets
editorstack = self.editorwidget.editorsplitter.editorstack
editor = editorstack.get_current_editor()
if editor is not None:
editor.setFocus()
self.setWindowTitle("Spyder - %s" % plugin.windowTitle())
self.setWindowIcon(plugin.windowIcon())
if toolbar_list:
self.toolbars = []
for title, object_name, actions in toolbar_list:
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setStyleSheet(str(APP_TOOLBAR_STYLESHEET))
toolbar.setMovable(False)
add_actions(toolbar, actions)
self.toolbars.append(toolbar)
if menu_list:
quit_action = create_action(self, _("Close window"),
icon=ima.icon("close_pane"),
tip=_("Close this window"),
triggered=self.close)
self.menus = []
for index, (title, actions) in enumerate(menu_list):
menu = self.menuBar().addMenu(title)
if index == 0:
# File menu
add_actions(menu, actions+[None, quit_action])
else:
add_actions(menu, actions)
self.menus.append(menu)
def get_toolbars(self):
"""Get the toolbars."""
return self.toolbars
def add_toolbars_to_menu(self, menu_title, actions):
"""Add toolbars to a menu."""
# Six is the position of the view menu in menus list
# that you can find in plugins/editor.py setup_other_windows.
view_menu = self.menus[6]
view_menu.setObjectName('checkbox-padding')
if actions == self.toolbars and view_menu:
toolbars = []
for toolbar in self.toolbars:
action = toolbar.toggleViewAction()
toolbars.append(action)
add_actions(view_menu, toolbars)
def load_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbars:
dic[toolbar.objectName()] = toolbar
toolbar.toggleViewAction().setChecked(False)
toolbar.setVisible(False)
for name in toolbars_names:
if name in dic:
dic[name].toggleViewAction().setChecked(True)
dic[name].setVisible(True)
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.isFullScreen():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
def closeEvent(self, event):
"""Reimplement Qt method"""
if self.plugin._undocked_window is not None:
self.plugin.dockwidget.setWidget(self.plugin)
self.plugin.dockwidget.setVisible(True)
self.plugin.switch_to_plugin()
QMainWindow.closeEvent(self, event)
if self.plugin._undocked_window is not None:
self.plugin._undocked_window = None
def get_layout_settings(self):
"""Return layout state"""
splitsettings = self.editorwidget.editorsplitter.get_layout_settings()
return dict(size=(self.window_size.width(), self.window_size.height()),
pos=(self.pos().x(), self.pos().y()),
is_maximized=self.isMaximized(),
is_fullscreen=self.isFullScreen(),
hexstate=qbytearray_to_str(self.saveState()),
splitsettings=splitsettings)
def set_layout_settings(self, settings):
"""Restore layout state"""
size = settings.get('size')
if size is not None:
self.resize( QSize(*size) )
self.window_size = self.size()
pos = settings.get('pos')
if pos is not None:
self.move( QPoint(*pos) )
hexstate = settings.get('hexstate')
if hexstate is not None:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
if settings.get('is_maximized'):
self.setWindowState(Qt.WindowMaximized)
if settings.get('is_fullscreen'):
self.setWindowState(Qt.WindowFullScreen)
splitsettings = settings.get('splitsettings')
if splitsettings is not None:
self.editorwidget.editorsplitter.set_layout_settings(splitsettings)
class EditorPluginExample(QSplitter):
def __init__(self):
QSplitter.__init__(self)
self._dock_action = None
self._undock_action = None
self._close_plugin_action = None
self._undocked_window = None
menu_actions = []
self.editorstacks = []
self.editorwindows = []
self.last_focused_editorstack = {} # fake
self.find_widget = FindReplace(self, enable_replace=True)
self.outlineexplorer = OutlineExplorerWidget(None, self, self)
self.outlineexplorer.edit_goto.connect(self.go_to_file)
self.editor_splitter = EditorSplitter(self, self, menu_actions,
first=True)
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editor_layout.addWidget(self.editor_splitter)
editor_layout.addWidget(self.find_widget)
self.setContentsMargins(0, 0, 0, 0)
self.addWidget(editor_widgets)
self.addWidget(self.outlineexplorer)
self.setStretchFactor(0, 5)
self.setStretchFactor(1, 1)
self.menu_actions = menu_actions
self.toolbar_list = None
self.menu_list = None
self.setup_window([], [])
def go_to_file(self, fname, lineno, text='', start_column=None):
editorstack = self.editorstacks[0]
editorstack.set_current_filename(to_text_string(fname))
editor = editorstack.get_current_editor()
editor.go_to_line(lineno, word=text, start_column=start_column)
def closeEvent(self, event):
for win in self.editorwindows[:]:
win.close()
logger.debug("%d: %r" % (len(self.editorwindows), self.editorwindows))
logger.debug("%d: %r" % (len(self.editorstacks), self.editorstacks))
event.accept()
def load(self, fname):
QApplication.processEvents()
editorstack = self.editorstacks[0]
editorstack.load(fname)
editorstack.analyze_script()
def register_editorstack(self, editorstack):
logger.debug("FakePlugin.register_editorstack: %r" % editorstack)
self.editorstacks.append(editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
editorstack.set_closable(len(self.editorstacks) > 1)
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
oe_btn = create_toolbutton(self)
editorstack.add_corner_widgets_to_tabbar([5, oe_btn])
action = QAction(self)
editorstack.set_io_actions(action, action, action, action)
font = QFont("Courier New")
font.setPointSize(10)
editorstack.set_default_font(font, color_scheme='Spyder')
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.plugin_load.connect(self.load)
def unregister_editorstack(self, editorstack):
logger.debug("FakePlugin.unregister_editorstack: %r" % editorstack)
self.editorstacks.pop(self.editorstacks.index(editorstack))
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
def setup_window(self, toolbar_list, menu_list):
self.toolbar_list = toolbar_list
self.menu_list = menu_list
def create_new_window(self):
window = EditorMainWindow(self, self.menu_actions,
self.toolbar_list, self.menu_list,
show_fullpath=False, show_all_files=False,
group_cells=True, show_comments=True,
sort_files_alphabetically=False)
window.resize(self.size())
window.show()
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
def register_editorwindow(self, window):
logger.debug("register_editorwindowQObject*: %r" % window)
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
logger.debug("unregister_editorwindow: %r" % window)
self.editorwindows.pop(self.editorwindows.index(window))
def get_focus_widget(self):
pass
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
def register_widget_shortcuts(self, widget):
"""Fake!"""
pass
def get_color_scheme(self):
pass
def test():
from spyder.utils.qthelpers import qapplication
from spyder.config.base import get_module_path
spyder_dir = get_module_path('spyder')
app = qapplication(test_time=8)
test = EditorPluginExample()
test.resize(900, 700)
test.show()
import time
t0 = time.time()
test.load(osp.join(spyder_dir, "widgets", "collectionseditor.py"))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"editor.py"))
test.load(osp.join(spyder_dir, "plugins", "explorer", "widgets",
'explorer.py'))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"codeeditor.py"))
print("Elapsed time: %.3f s" % (time.time()-t0)) # spyder: test-skip
sys.exit(app.exec_())
if __name__ == "__main__":
test()
|
the-stack_0_14464 | import collections
import cv2
import face_recognition.detect_face as detect_face
import face_recognition.facenet as facenet
import math
import numpy as np
import os
import pickle
import sys
import tensorflow as tf
import time
import urllib.request as ur
from datetime import datetime
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from timeit import default_timer as timer
from sklearn.svm import SVC
# path to the user setting file
SETTING_PATH = 'setting.txt'
# read user settings from the setting text file
setting_file = open(SETTING_PATH, 'r')
# define the IP webcam to be used as the input
setting_file.readline()
URL = str(setting_file.readline())
# program starting time
start_time = datetime.now()
# path to the object detection log file, making sure there's no invalid characters in the file name
OBJECT_DETECTION_LOG_PATH = 'object_detection/object-' + str(start_time.date()) + '-' + str(start_time.time()).replace(':', '-') + '.txt'
# path to the face recognition log file, making sure there's no invalid characters in the file name
FACE_RECOGNITION_LOG_PATH = 'face_recognition/face-' + str(start_time.date()) + '-' + str(start_time.time()).replace(':', '-') + '.txt'
# variables for calculating fps
fps_count_started = False
fps_count_start_time = 0.0
fps_count_end_time = 0.0
fps_count_num_of_frames = 0
"""
Object detection.
Variables.
"""
object_detection_initialised = False
object_detection_on = False
# path to the user setting file for object detection
OBJECT_DETECTION_SETTING_PATH = 'object_detection/object_detection_setting.txt'
# path to object detection models
OBJECT_DETECTION_MODEL_PATH = 'models/object_detection/'
# user setting
# read user settings from the setting text file
object_detection_setting_file = open(OBJECT_DETECTION_SETTING_PATH, 'r')
# define the object detection model to be used
object_detection_setting_file.readline()
object_detection_model_name = object_detection_setting_file.readline()
# get rid of the line break at the end of the line just read
object_detection_model_name = object_detection_model_name.rstrip('\n')
# path to the frozen detection graph, which is the actual model used to perform object detection
OBJECT_DETECTION_CKPT_PATH = OBJECT_DETECTION_MODEL_PATH + object_detection_model_name + '/frozen_inference_graph.pb'
# path to the label map consisting of labels to be added to corresponding detection boxes
OBJECT_DETECTION_LABELS_PATH = OBJECT_DETECTION_MODEL_PATH + object_detection_model_name + '/oid_v5_label_map_customised.pbtxt'
# define the max number of classes of objects to be detected
object_detection_setting_file.readline()
max_num_classes_object = int(object_detection_setting_file.readline())
# define which classes of objects to be detected
selected_classes_object = []
object_detection_setting_file.readline()
for i in range(max_num_classes_object):
object_detection_setting_file.readline()
class_setting = int(object_detection_setting_file.readline())
if class_setting == 1:
selected_classes_object.append(i+1)
label_map_object = label_map_util.load_labelmap(OBJECT_DETECTION_LABELS_PATH)
categories_object = label_map_util.convert_label_map_to_categories(label_map_object, max_num_classes=max_num_classes_object, use_display_name=True)
category_index_object = label_map_util.create_category_index(categories_object)
# load the object detection model into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(OBJECT_DETECTION_CKPT_PATH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess_object = tf.Session(graph=detection_graph)
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
"""
Face recognition.
Variables.
"""
face_recognition_on = False
# path to the user setting file for face recognition
FACE_RECOGNITION_SETTING_PATH = 'face_recognition/face_recognition_setting.txt'
# path to face recognition models.
FACE_RECOGNITION_MODEL_PATH = 'models/face_recognition/'
# path to the model used to perform face detection.
FACE_RECOGNITION_CKPT_PATH = FACE_RECOGNITION_MODEL_PATH + '20180402-114759.pb'
# path to the model used to perform face recognition.
FACE_RECOGNITION_CLASSIFIER_PATH = FACE_RECOGNITION_MODEL_PATH + 'my_classifier.pkl'
# path to the label map consisting of labels to be added to corresponding detection boxes.
FACE_RECOGNITION_LABELS_PATH = FACE_RECOGNITION_MODEL_PATH + 'facenet_label_map.pbtxt'
# user setting
# read user settings from the setting text file
face_recognition_setting_file = open(FACE_RECOGNITION_SETTING_PATH, 'r')
# define the max number of classes of faces to be detected
face_recognition_setting_file.readline()
max_num_classes_face = int(face_recognition_setting_file.readline())
# define the size of the input to be resized to
face_recognition_setting_file.readline()
input_image_size_face = int(face_recognition_setting_file.readline())
# define the minimum face size to be detected
face_recognition_setting_file.readline()
minsize_face = int(face_recognition_setting_file.readline())
# define the three steps face detection threshold
threshold_detection_face = [0.0, 0.0, 0.0]
face_recognition_setting_file.readline()
for i in range(3):
threshold_detection_face[i] = float(face_recognition_setting_file.readline())
# define the factor used to create a scaling pyramid of face sizes to detect in the image
face_recognition_setting_file.readline()
factor_face = float(face_recognition_setting_file.readline())
# define the face recognition threshold
face_recognition_setting_file.readline()
threshold_recognition_face = float(face_recognition_setting_file.readline())
label_map_face = label_map_util.load_labelmap(FACE_RECOGNITION_LABELS_PATH)
categories_face = label_map_util.convert_label_map_to_categories(label_map_face, max_num_classes=max_num_classes_face, use_display_name=True)
category_index_face = label_map_util.create_category_index(categories_face)
# load The Custom Classifier
with open(FACE_RECOGNITION_CLASSIFIER_PATH, 'rb') as file:
model, class_names = pickle.load(file)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess_face = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
# load the model
facenet.load_model(FACE_RECOGNITION_CKPT_PATH)
# get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
pnet, rnet, onet = detect_face.create_mtcnn(sess_face, "./face_recognition")
while(True):
image = ur.urlopen(URL)
image_array = np.array(bytearray(image.read()),dtype=np.uint8)
frame = cv2.imdecode(image_array,-1)
# dimension of the input image
frame_shape = frame.shape
"""
Object detection.
Runtime.
"""
if object_detection_initialised == False or object_detection_on == True:
frame_expanded = np.expand_dims(frame, axis=0)
(boxes_object, scores_object, classes_object, num_object) = sess_object.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
if object_detection_initialised == True:
num_of_objects_detected = int(num_object[0])
for i in range(0, num_of_objects_detected):
# only draw objects of selected classes
if int(classes_object[0][i]) not in selected_classes_object:
boxes_object[0][i] = [float(0), float(0), float(0), float(0)]
scores_object[0][i] = float(0)
classes_object[0][i] = float(1)
num_object[0] = num_object[0] - 1
# report objects of selected classes once detected
else:
with open(OBJECT_DETECTION_LOG_PATH, 'a') as log_file:
log_file.write('Time: ' + str(datetime.now()) + '\tCategory: ' + str(int(classes_object[0][i])) + '\tScore: ' + str(scores_object[0][i]) + '\n')
# visualise the detection results.
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes_object),
np.squeeze(classes_object).astype(np.int32),
np.squeeze(scores_object),
category_index_object,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# only run initialisation once.
if object_detection_initialised == False:
object_detection_initialised = True
"""
Face recognition.
Runtime.
"""
if face_recognition_on == True:
bounding_boxes_face, _ = detect_face.detect_face(frame, minsize_face, pnet, rnet, onet, threshold_detection_face, factor_face)
faces_found = bounding_boxes_face.shape[0]
boxes_face = [[[float(0),float(0),float(0),float(0)]] * (faces_found+1)]
scores_face = [[float(0)] * (faces_found+1)]
classes_face = [[float(0)] * (faces_found+1)]
try:
if faces_found > 0:
det_face = bounding_boxes_face[:, 0:4]
bb_face = np.zeros((faces_found, 4), dtype=np.int32)
for i in range(faces_found):
bb_face[i][0] = det_face[i][0]
bb_face[i][1] = det_face[i][1]
bb_face[i][2] = det_face[i][2]
bb_face[i][3] = det_face[i][3]
cropped_face = frame[bb_face[i][1]:bb_face[i][3], bb_face[i][0]:bb_face[i][2], :]
scaled_face = cv2.resize(cropped_face, (input_image_size_face, input_image_size_face), interpolation=cv2.INTER_CUBIC)
scaled_face = facenet.prewhiten(scaled_face)
reshaped_face = scaled_face.reshape(-1, input_image_size_face, input_image_size_face, 3)
embed_array_face = sess_face.run(embeddings, feed_dict={images_placeholder: reshaped_face, phase_train_placeholder: False})
predictions_face = model.predict_proba(embed_array_face)
best_class_indices_face = np.argmax(predictions_face, axis=1)
best_class_score_face = predictions_face[np.arange(len(best_class_indices_face)), best_class_indices_face]
best_name_face = class_names[best_class_indices_face[0]]
# get relative coordinates of detection boxes
boxes_face[0][i] = [float(bb_face[i][1])/frame_shape[0], float(bb_face[i][0])/frame_shape[1], float(bb_face[i][3])/frame_shape[0], float(bb_face[i][2])/frame_shape[1]]
# the confidence score of a face is the one of its best match
scores_face[0][i] = float(best_class_score_face)
# a face is considered being recognised as someone when the best match has a score higher than the threshold
if best_class_score_face > threshold_recognition_face:
classes_face[0][i] = float(best_class_indices_face[0] + 2)
# otherwise the face detected is considered unknown
else:
classes_face[0][i] = float(1)
# report unknown faces once detected
with open(FACE_RECOGNITION_LOG_PATH, 'a') as log_file:
log_file.write('Time: ' + str(datetime.now()) + '\tScore: ' + str(scores_face[0][i]) + '\n')
# visualise the detection and recognition results.
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes_face),
np.squeeze(classes_face).astype(np.int32),
np.squeeze(scores_face),
category_index_face,
use_normalized_coordinates=True,
line_thickness=8)
except:
pass
# display the result image
cv2.imshow('Smart Surveillance Camera', frame)
# increment number of frames being processed by one for calculating FPS
fps_count_num_of_frames = fps_count_num_of_frames + 1
# handle user input
key = cv2.waitKey(1)
# press 'q' to exit
if key == ord('q'):
break
# press 'o' to switch object detection on and off
elif key == ord('o'):
object_detection_on = not object_detection_on
# press 'f' to switch face recognition on and off
elif key == ord('f'):
face_recognition_on = not face_recognition_on
# press 'p' to switch fps calculation on and off
elif key == ord('p'):
# initialise and start the fps calculation if it's not already started
if fps_count_started == False:
fps_count_num_of_frames = 0
fps_count_start_time = timer()
fps_count_started = True
# stop, calculate and display the fps if it's already started
else:
fps_count_started = False
fps_count_end_time = timer()
fps = fps_count_num_of_frames / (fps_count_end_time - fps_count_start_time)
print('FPS:' + str(fps))
cv2.destroyAllWindows() |
the-stack_0_14466 | from .utils import methods
DEBUG_MODE = False
class Router():
def __init__(self):
self.routes = {}
self.num_middleware = 0
for method in methods:
self._generate_add_route_method(method)
# helper method for adding routes.
# if middleware is provided, mount the middleware at the path.
# if not, return a decorator that mounts the function as middleware at path.
def _add_route(self, path, middleware=None, method='*'):
if path not in self.routes:
self.routes[path] = {}
method = method.upper()
if method not in self.routes[path]:
self.routes[path][method] = []
# the actual method that mounts the middleware to the route and method.
# may be returned as a decorator.
def add_route(f):
f.index = self.num_middleware
self.num_middleware += 1
self.routes[path][method].append(f)
if DEBUG_MODE:
print('mounting middleware %s at path %s' % (middleware, path))
if middleware:
for m in middleware:
add_route(m)
else:
return add_route
# generates an add_route method for EVERY HTTP method.
# this is for app.get, app.post, app.trace, app.mkactivity, etc.
def _generate_add_route_method(self, method):
add_route = self._add_route
if DEBUG_MODE:
print('registering app.%s' % (method))
def add_route_method(path='*', *middleware):
if not isinstance(path, str):
middleware = [path] + [m for m in middleware]
path = '*'
return add_route(path, middleware, method)
# temporary solution ?? ? ? ?
setattr(self, method, add_route_method)
return getattr(self, method)
# mount the middleware for all requests to the path.
def use(self, path='*', *middleware):
# path can be a piece of middleware
# if so, append it to middleware
if not isinstance(path, str):
middleware = [path] + [m for m in middleware]
path = '*'
return self._add_route(path, middleware)
def route(self, path='*'):
pass
|
the-stack_0_14467 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.tf_sequence_example_decoder."""
import numpy as np
import tensorflow as tf
from lstm_object_detection.inputs import tf_sequence_example_decoder
from object_detection.core import standard_fields as fields
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
class TFSequenceExampleDecoderTest(tf.test.TestCase):
"""Tests for sequence example decoder."""
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def testDecodeJpegImageAndBoundingBox(self):
"""Test if the decoder can correctly decode the image and bounding box.
A set of random images (represented as an image tensor) is first decoded as
the groundtrue image. Meanwhile, the image tensor will be encoded and pass
through the sequence example, and then decoded as images. The groundtruth
image and the decoded image are expected to be equal. Similar tests are
also applied to labels such as bounding box.
"""
image_tensor = np.random.randint(256, size=(256, 256, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
sequence_example = example_pb2.SequenceExample(
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
})).SerializeToString()
example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(sequence_example))
# Test tensor dict image dimension.
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.image].get_shape().as_list()),
[None, None, None, 3])
with self.test_session() as sess:
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image])
tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze(
tensor_dict[fields.InputDataFields.groundtruth_boxes])
tensor_dict = sess.run(tensor_dict)
# Test decoded image.
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
# Test decoded bounding box.
self.assertAllEqual([0.0, 0.0, 1.0, 1.0],
tensor_dict[fields.InputDataFields.groundtruth_boxes])
if __name__ == '__main__':
tf.test.main()
|
the-stack_0_14468 | """Module for intervention access strategy functions
Determining whether or not to provide access to a given intervention
for a user is occasionally tricky business. By way of the access_strategies
property on all interventions, one can add additional criteria by defining a
function here (or elsewhere) and adding it to the desired intervention.
function signature: takes named parameters (intervention, user) and returns
a boolean - True grants access (and short circuits further access tests),
False does not.
NB - several functions are closures returning access_strategy functions with
the parameters given to the closures.
"""
from datetime import datetime
import json
import sys
from flask import current_app, url_for
from flask_babel import gettext as _
from sqlalchemy import UniqueConstraint, and_
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from ..database import db
from ..date_tools import localize_datetime
from ..system_uri import DECISION_SUPPORT_GROUP, TRUENTH_CLINICAL_CODE_SYSTEM
from .clinical_constants import CC
from .codeable_concept import CodeableConcept
from .coding import Coding
from .identifier import Identifier
from .intervention import INTERVENTION, Intervention, UserIntervention
from .organization import Organization, OrganizationIdentifier, OrgTree
from .overall_status import OverallStatus
from .procedure_codes import known_treatment_started
from .role import Role
# ##
# # functions implementing the 'access_strategy' API
# ##
__log_strats = None
def _log(**kwargs):
"""Wrapper to log all the access lookup results within"""
# get config value if haven't yet
global __log_strats
if __log_strats is None:
__log_strats = current_app.config.get("LOG_DEBUG_STRATS", False)
if __log_strats:
msg = kwargs.get('message', '') # optional
current_app.logger.debug(
"{func_name} returning {result} for {user} on intervention "
"{intervention}".format(**kwargs) + msg)
def limit_by_clinic_w_id(
identifier_value, identifier_system=DECISION_SUPPORT_GROUP,
combinator='any', include_children=True):
"""Requires user is associated with {any,all} clinics with identifier
:param identifier_value: value string for identifer associated with org(s)
:param identifier_system: system string for identifier, defaults to
DECISION_SUPPORT_GROUP
:param combinator: determines if the user must be in 'any' (default) or
'all' of the clinics in the given list. NB combining 'all' with
include_children=True would mean all orgs in the list AND all chidren of
all orgs in list must be associated with the user for a true result.
:param include_children: include children in the organization tree if
set (default), otherwise, only include the organizations in the list
"""
try:
identifier = Identifier.query.filter_by(
_value=identifier_value, system=identifier_system).one()
except NoResultFound:
raise ValueError(
"strategy names non-existing Identifier({}, {})".format(
identifier_value, identifier_system))
orgs = Organization.query.join(OrganizationIdentifier).filter(and_(
Organization.id == OrganizationIdentifier.organization_id,
OrganizationIdentifier.identifier_id == identifier.id)).all()
if include_children:
ot = OrgTree()
required = {o for og in orgs for o in ot.here_and_below_id(og.id)}
else:
required = set((o.id for o in orgs))
if combinator not in ('any', 'all'):
raise ValueError("unknown value {} for combinator, must be any or all")
def user_registered_with_all_clinics(intervention, user):
has = set((o.id for o in user.organizations))
if required.intersection(has) == required:
_log(result=True, func_name='limit_by_clinic_list', user=user,
intervention=intervention.name)
return True
def user_registered_with_any_clinics(intervention, user):
has = set((o.id for o in user.organizations))
if not required.isdisjoint(has):
_log(result=True, func_name='limit_by_clinic_list', user=user,
intervention=intervention.name)
return True
return (
user_registered_with_all_clinics if combinator == 'all'
else user_registered_with_any_clinics)
def not_in_clinic_w_id(
identifier_value, identifier_system=DECISION_SUPPORT_GROUP,
include_children=True):
"""Requires user isn't associated with any clinic in the list
:param identifier_value: value string for identifer associated with org(s)
:param identifier_system: system string for identifier, defaults to
DECISION_SUPPORT_GROUP
:param include_children: include children in the organization tree if
set (default), otherwise, only include the organizations directly
associated with the identifier
"""
try:
identifier = Identifier.query.filter_by(
_value=identifier_value, system=identifier_system).one()
except NoResultFound:
raise ValueError(
"strategy names non-existing Identifier({}, {})".format(
identifier_value, identifier_system))
orgs = Organization.query.join(OrganizationIdentifier).filter(and_(
Organization.id == OrganizationIdentifier.organization_id,
OrganizationIdentifier.identifier_id == identifier.id)).all()
if include_children:
ot = OrgTree()
dont_want = {o for og in orgs for o in ot.here_and_below_id(og.id)}
else:
dont_want = set((o.id for o in orgs))
def user_not_registered_with_clinics(intervention, user):
has = set((o.id for o in user.organizations))
if has.isdisjoint(dont_want):
_log(result=True, func_name='not_in_clinic_list', user=user,
intervention=intervention.name)
return True
return user_not_registered_with_clinics
def in_role_list(role_list):
"""Requires user is associated with any role in the list"""
roles = []
for role in role_list:
try:
role = Role.query.filter_by(
name=role).one()
roles.append(role)
except NoResultFound:
raise ValueError("role '{}' not found".format(role))
except MultipleResultsFound:
raise ValueError("more than one role named '{}'"
"found".format(role))
required = set(roles)
def user_has_given_role(intervention, user):
has = set(user.roles)
if has.intersection(required):
_log(result=True, func_name='in_role_list', user=user,
intervention=intervention.name)
return True
return user_has_given_role
def not_in_role_list(role_list):
"""Requires user isn't associated with any role in the list"""
roles = []
for role in role_list:
try:
role = Role.query.filter_by(
name=role).one()
roles.append(role)
except NoResultFound:
raise ValueError("role '{}' not found".format(role))
except MultipleResultsFound:
raise ValueError("more than one role named '{}'"
"found".format(role))
dont_want = set(roles)
def user_not_given_role(intervention, user):
has = set(user.roles)
if has.isdisjoint(dont_want):
_log(result=True, func_name='not_in_role_list', user=user,
intervention=intervention.name)
return True
return user_not_given_role
def allow_if_not_in_intervention(intervention_name):
"""Strategy API checks user does not belong to named intervention"""
exclusive_intervention = getattr(INTERVENTION, intervention_name)
def user_not_in_intervention(intervention, user):
if not exclusive_intervention.quick_access_check(user):
_log(result=True, func_name='user_not_in_intervention', user=user,
intervention=intervention.name)
return True
return user_not_in_intervention
def tx_begun(boolean_value):
"""Returns strategy function testing if user is known to have started Tx
:param boolean_value: true for known treatment started (i.e. procedure
indicating tx has begun), false to confirm a user doesn't have
a procedure indicating tx has begun
"""
if boolean_value == 'true':
check_func = known_treatment_started
elif boolean_value == 'false':
def check_func(u):
return not known_treatment_started(u)
else:
raise ValueError("expected 'true' or 'false' for boolean_value")
def user_has_desired_tx(intervention, user):
return check_func(user)
return user_has_desired_tx
def observation_check(display, boolean_value, invert_logic=False):
"""Returns strategy function for a particular observation and logic value
:param display: observation coding.display from
TRUENTH_CLINICAL_CODE_SYSTEM
:param boolean_value: ValueQuantity boolean true or false expected
:param invert_logic: Effective binary ``not`` to apply to test. If set,
will return True only if given observation with boolean_value is NOT
defined for user
NB a history of observations is maintained, with the most recent taking
precedence.
"""
try:
coding = Coding.query.filter_by(
system=TRUENTH_CLINICAL_CODE_SYSTEM, display=display).one()
except NoResultFound:
raise ValueError("coding.display '{}' not found".format(display))
try:
cc = CodeableConcept.query.filter(
CodeableConcept.codings.contains(coding)).one()
except NoResultFound:
raise ValueError("codeable_concept'{}' not found".format(coding))
if boolean_value == 'true':
vq = CC.TRUE_VALUE
elif boolean_value == 'false':
vq = CC.FALSE_VALUE
else:
raise ValueError("boolean_value must be 'true' or 'false'")
def user_has_matching_observation(intervention, user):
value, status = user.fetch_value_status_for_concept(
codeable_concept=cc)
if value == vq:
_log(result=True, func_name='observation_check', user=user,
intervention=intervention.name,
message='{}:{}'.format(coding.display, vq.value))
return True if not invert_logic else False
return False if not invert_logic else True
return user_has_matching_observation
def combine_strategies(**kwargs):
"""Make multiple strategies into a single statement
The nature of the access lookup returns True for the first
success in the list of strategies for an intervention. Use
this method to chain multiple strategies together into a logical **and**
fashion rather than the built in locical **or**.
NB - kwargs must have keys such as 'strategy_n', 'strategy_n_kwargs'
for every 'n' strategies being combined, starting at 1. Set arbitrary
limit of 6 strategies for time being.
Nested strategies may actually want a logical 'OR'. Optional kwarg
`combinator` takes values {'any', 'all'} - default 'all' means all
strategies must evaluate true. 'any' means just one must eval true for a
positive result.
"""
strats = []
arbitrary_limit = 7
if 'strategy_{}'.format(arbitrary_limit) in kwargs:
raise ValueError(
"only supporting %d combined strategies", arbitrary_limit - 1)
for i in range(1, arbitrary_limit):
if 'strategy_{}'.format(i) not in kwargs:
break
func_name = kwargs['strategy_{}'.format(i)]
func_kwargs = {}
for argset in kwargs['strategy_{}_kwargs'.format(i)]:
func_kwargs[argset['name']] = argset['value']
func = getattr(sys.modules[__name__], func_name)
strats.append(func(**func_kwargs))
def call_all_combined(intervention, user):
"""Returns True if ALL of the combined strategies return True"""
for strategy in strats:
if not strategy(intervention, user):
_log(
result=False, func_name='combine_strategies', user=user,
intervention=intervention.name)
return
# still here? effective AND passed as all returned true
_log(
result=True, func_name='combine_strategies', user=user,
intervention=intervention.name)
return True
def call_any_combined(intervention, user):
"""Returns True if ANY of the combined strategies return True"""
for strategy in strats:
if strategy(intervention, user):
_log(
result=True, func_name='combine_strategies', user=user,
intervention=intervention.name)
return True
# still here? effective ANY failed as none returned true
_log(
result=False, func_name='combine_strategies', user=user,
intervention=intervention.name)
return
combinator = kwargs.get('combinator', 'all')
if combinator == 'any':
return call_any_combined
elif combinator == 'all':
return call_all_combined
else:
raise ValueError("unrecognized value {} for `combinator`, "
"limited to {'any', 'all'}").format(combinator)
class AccessStrategy(db.Model):
"""ORM to persist access strategies on an intervention
The function_details field contains JSON defining which strategy to
use and how it should be instantiated by one of the closures implementing
the access_strategy interface. Said closures must be defined in this
module (a security measure to keep unsanitized code out).
"""
__tablename__ = 'access_strategies'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
description = db.Column(db.Text)
intervention_id = db.Column(
db.ForeignKey('interventions.id'), nullable=False)
rank = db.Column(db.Integer)
function_details = db.Column(JSONB, nullable=False)
__table_args__ = (UniqueConstraint('intervention_id', 'rank',
name='rank_per_intervention'),)
def __str__(self):
"""Log friendly string format"""
return (
"AccessStrategy: {0.name} {0.description} {0.rank}"
"{0.function_details}").format(self)
@classmethod
def from_json(cls, data):
strat = cls()
return strat.update_from_json(data)
def update_from_json(self, data):
try:
self.name = data['name']
if 'id' in data:
self.id = data['id']
if 'intervention_name' in data:
intervention = Intervention.query.filter_by(
name=data['intervention_name']).first()
if not intervention:
raise ValueError(
'Intervention not found {}. (NB: new interventions '
'require `seed -i` to import)'.format(
data['intervention_name']))
self.intervention_id = intervention.id
if 'description' in data:
self.description = data['description']
if 'rank' in data:
self.rank = data['rank']
self.function_details = json.dumps(data['function_details'])
# validate the given details by attempting to instantiate
self.instantiate()
except Exception as e:
raise ValueError("AccessStrategy instantiation error: {}".format(
e))
return self
def as_json(self):
"""Return self in JSON friendly dictionary"""
d = {
"name": self.name,
"resourceType": 'AccessStrategy'
}
d["function_details"] = (
json.loads(self.function_details) if self.function_details
else None)
d['intervention_name'] = (
Intervention.query.get(self.intervention_id).name
if self.intervention_id else None)
if self.id:
d['id'] = self.id
if self.rank:
d['rank'] = self.rank
if self.description:
d['description'] = self.description
return d
def instantiate(self):
"""Bring the serialized access strategy function to life
Using the JSON in self.function_details, instantiate the
function and return it ready to use.
"""
details = json.loads(self.function_details)
if 'function' not in details:
raise ValueError("'function' not found in function_details")
if 'kwargs' not in details:
raise ValueError("'kwargs' not found in function_details")
func_name = details['function']
# limit to this module
func = getattr(sys.modules[__name__], func_name)
kwargs = {}
for argset in details['kwargs']:
kwargs[argset['name']] = argset['value']
return func(**kwargs)
|
the-stack_0_14469 | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import hashlib
import os
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.python_target import PythonTarget
from pants.base.fingerprint_strategy import DefaultFingerprintHashingMixin, FingerprintStrategy
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir_for
class PythonInterpreterFingerprintStrategy(DefaultFingerprintHashingMixin, FingerprintStrategy):
def __init__(self, python_setup):
self.python_setup = python_setup
def compute_fingerprint(self, python_target):
# Consider the target's compatibility requirements, and if those are missing then fall back
# to the global interpreter constraints. Only these two values can affect the selected interpreter.
hash_elements_for_target = sorted(
self.python_setup.compatibility_or_constraints(python_target.compatibility)
)
if not hash_elements_for_target:
return None
hasher = hashlib.sha1()
for element in hash_elements_for_target:
hasher.update(element.encode())
return hasher.hexdigest()
class SelectInterpreter(Task):
"""Select an Python interpreter that matches the constraints of all targets in the working
set."""
@classmethod
def implementation_version(cls):
# TODO(John Sirois): Fixup this task to use VTS results_dirs. Right now version bumps aren't
# effective in dealing with workdir data format changes.
return super().implementation_version() + [("SelectInterpreter", 4)]
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (PythonInterpreterCache,)
@classmethod
def product_types(cls):
return [PythonInterpreter]
@property
def _interpreter_cache(self):
return PythonInterpreterCache.global_instance()
def execute(self):
# NB: Downstream product consumers may need the selected interpreter for use with
# any type of importable Python target, including `PythonRequirementLibrary` targets
# (for use with the `repl` goal, for instance). For interpreter selection,
# we only care about targets with compatibility constraints.
python_tgts_and_reqs = self.context.targets(
lambda tgt: isinstance(tgt, (PythonTarget, PythonRequirementLibrary))
)
if not python_tgts_and_reqs:
return
python_tgts = [tgt for tgt in python_tgts_and_reqs if isinstance(tgt, PythonTarget)]
fs = PythonInterpreterFingerprintStrategy(python_setup=self._interpreter_cache.python_setup)
with self.invalidated(python_tgts, fingerprint_strategy=fs) as invalidation_check:
# If there are no constraints, meaning no global constraints nor compatibility requirements on
# the targets, we still go through the motions of selecting an interpreter, to prevent
# downstream tasks from having to check for this special case.
target_set_id = (
"no_constraints"
if not invalidation_check.all_vts
else VersionedTargetSet.from_versioned_targets(
invalidation_check.all_vts
).cache_key.hash
)
interpreter_path_file = self._interpreter_path_file(target_set_id)
interpreter = self._get_interpreter(interpreter_path_file, python_tgts)
self.context.products.register_data(PythonInterpreter, interpreter)
def _select_interpreter(self, interpreter_path_file, targets):
interpreter = self._interpreter_cache.select_interpreter_for_targets(targets)
safe_mkdir_for(interpreter_path_file)
with open(interpreter_path_file, "w") as outfile:
outfile.write(f"{interpreter.binary}\n")
return interpreter
def _interpreter_path_file(self, target_set_id):
# NB: The file name must be changed when its format changes. See the TODO in
# `implementation_version` above for more.
#
# The historical names to avoid:
# - interpreter.path
# - interpreter.info
return os.path.join(self.workdir, target_set_id, "interpreter.binary")
def _get_interpreter(self, interpreter_path_file, targets):
if os.path.exists(interpreter_path_file):
with open(interpreter_path_file, "r") as infile:
binary = infile.read().strip()
try:
return PythonInterpreter.from_binary(binary)
except PythonInterpreter.Error:
self.context.log.info(
"Stale interpreter reference detected: {}, removing reference and "
"selecting a new interpreter.".format(binary)
)
os.remove(interpreter_path_file)
return self._select_interpreter(interpreter_path_file, targets)
|
the-stack_0_14473 | from pathlib import Path
new_keys = set()
for file in Path('/').glob('*.txt'):
with file.open('r') as f:
for line in f.readlines():
new_keys.add(line.strip('\n'))
with open('condenced.txt', 'w') as f:
for key in new_keys:
f.write(key + '\n')
|
the-stack_0_14474 | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHAFileForPythia8 = cms.string('Configuration/Generator/data/GMSB/GMSB_Lambda100TeV_CTau1200cm.slha'),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
the-stack_0_14475 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test a node with the -disablewallet option.
- Test that validateaddress RPC works when running with -disablewallet
- Test that it is not possible to mine to an invalid address.
"""
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import *
class DisableWalletTest (SarielsazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-disablewallet"]]
def run_test (self):
# Make sure wallet is really disabled
assert_raises_rpc_error(-32601, 'Method not found', self.nodes[0].getwalletinfo)
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet. Generating to a valid address should succeed
# but generating to an invalid address will fail.
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].generatetoaddress, 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
if __name__ == '__main__':
DisableWalletTest ().main ()
|
the-stack_0_14476 |
from cachelib import SimpleCache
from slickqaweb.api.project import get_project, get_release, get_build
cache = SimpleCache()
def get_project_release_build_ids(project_name, release_name, build_name):
retval = []
project = None
release = None
build = None
if project_name is None:
retval.append(None)
else:
cache_key = "project-{}".format(project_name)
if cache.has(cache_key):
retval.append(cache.get(cache_key))
else:
project = get_project(project_name)
if project is not None:
retval.append(project.id)
cache.set(cache_key, project.id)
else:
retval.append(None)
if release_name is None:
retval.append(None)
else:
cache_key = "release-{}".format(release_name)
if cache.has(cache_key):
retval.append(cache.get(cache_key))
else:
if project is not None:
release = get_release(project, release_name)
if release is not None:
retval.append(release.id)
cache.set(cache_key, release.id)
else:
retval.append(None)
if build_name is None:
retval.append(None)
else:
cache_key = "build-{}".format(build_name)
if cache.has(cache_key):
retval.append(cache.get(cache_key))
else:
if release is not None:
build = get_build(release, build_name)
if build is not None:
retval.append(build.id)
cache.set(cache_key, build.id)
else:
retval.append(None)
return retval
|
the-stack_0_14484 | import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import time
import re
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch.utils.data as data
from torchvision import transforms
import torch
import pdb
import argparse
from src import Networks
from sklearn.metrics import confusion_matrix
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
backends = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface']
from deepface import DeepFace
from deepface.extendedmodels import Age
from deepface.commons import functions, realtime, distance as dst
from deepface.detectors import OpenCvWrapper
input_shape = (224, 224);
input_shape_x = input_shape[0];
input_shape_y = input_shape[1]
text_color = (255, 255, 255)
frame_threshold = 1
time_threshold = 0.1
tic = time.time()
data_transforms_test = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
#emotion_model = DeepFace.build_model('Emotion')
emotion_model = Networks.ResNet18_ARM___RAF()
print("Loading pretrained weights...models/RAF-DB/epoch59_acc0.9205.pth")
checkpoint = torch.load('./models/RAF-DB/epoch59_acc0.9205.pth')
emotion_model.load_state_dict(checkpoint["model_state_dict"], strict=False)
emotion_model = emotion_model.cuda()
print("Emotion model loaded")
toc = time.time()
print("Facial attibute analysis models loaded in ", toc - tic, " seconds")
pivot_img_size = 112 # face recognition result image
# -----------------------
opencv_path = OpenCvWrapper.get_opencv_path()
face_detector_path = opencv_path + "haarcascade_frontalface_default.xml"
face_cascade = cv2.CascadeClassifier(face_detector_path)
# -----------------------
freeze = False
face_detected = False
face_included_frames = 0 # freeze screen if face detected sequantially 5 frames
freezed_frame = 0
tic = time.time()
cap = cv2.VideoCapture(0) # webcam
_cnt_frame = 0
emotion_model.eval()
while (True):
_start = time.time()
ret, img = cap.read()
_cnt_frame += 1
if img is None:
break
raw_img = img.copy()
resolution = img.shape
resolution_x = img.shape[1];
resolution_y = img.shape[0]
if freeze == False:
faces = face_cascade.detectMultiScale(img, 1.3, 5)
fc_img, faces = DeepFace.detectFace(img, detector_backend = backends[1])
if len(faces) == 0:
face_included_frames = 0
else:
faces = []
detected_faces = []
face_index = 0
if len(faces)==0:
faces = faces
else:
faces = [faces]
for (x, y, w, h) in faces:
if w > 130: # discard small detected faces
face_detected = True
if face_index == 0:
face_included_frames = face_included_frames + 1 # increase frame for a single face
cv2.rectangle(img, (x, y), (x + w, y + h), (67, 67, 67), 1) # draw rectangle to main image
cv2.putText(img, str(frame_threshold - face_included_frames), (int(x + w / 4), int(y + h / 1.5)),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 2)
detected_face = img[int(y):int(y + h), int(x):int(x + w)] # crop detected face
# -------------------------------------
detected_faces.append((x, y, w, h))
face_index = face_index + 1
# -------------------------------------
if face_detected == True and face_included_frames == frame_threshold and freeze == False:
freeze = True
# base_img = img.copy()
base_img = raw_img.copy()
detected_faces_final = detected_faces.copy()
tic = time.time()
if freeze == True:
toc = time.time()
if (toc - tic) < time_threshold:
#
# if freezed_frame == 0:
freeze_img = base_img.copy()
# freeze_img = np.zeros(resolution, np.uint8) #here, np.uint8 handles showing white area issue
emotion_predictions = np.zeros((7), dtype=float)
for detected_face in detected_faces_final:
x = detected_face[0];
y = detected_face[1]
w = detected_face[2];
h = detected_face[3]
cv2.rectangle(freeze_img, (x, y), (x + w, y + h), (67, 67, 67), 1) # draw rectangle to main image
# -------------------------------
# apply deep learning for custom_face
custom_face = base_img[y:y + h, x:x + w]
# -------------------------------
# facial attribute analysis
gray_img = torch.unsqueeze(data_transforms_test(custom_face),0)
# emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] #Original
emotion_labels = ['Surprise', 'Fear', 'Disgust', 'Happy', 'Sad','Angry','Neutral']
outputs, _ = emotion_model(gray_img.cuda())
_emotion_predictions = torch.softmax(outputs,1)
# _emotion_predictions = emotion_model.predict(gray_img)[0, :]
emotion_predictions = torch.squeeze(_emotion_predictions).detach().cpu().numpy()
sum_of_predictions = emotion_predictions.sum()
mood_items = []
print('===================================================================================')
print('%d of frames' % (_cnt_frame))
for i in range(0, len(emotion_labels)):
mood_item = []
emotion_label = emotion_labels[i]
emotion_prediction = 100 * emotion_predictions[i] / sum_of_predictions
mood_item.append(emotion_label)
mood_item.append(emotion_prediction)
mood_items.append(mood_item)
print('Emotion: %s - Confidence: %f' % (emotion_labels[i], emotion_prediction))
print('===================================================================================')
emotion_df = pd.DataFrame(mood_items, columns=["emotion", "score"]) # pd Dataset emotion dataset.
emotion_df = emotion_df.sort_values(by=["score"], ascending=False).reset_index(
drop=True) # pd Dataset emotion dataset.
'''
'emotion_df' contains emotion labels and the scores of each emotion class.
'''
overlay = freeze_img.copy()
opacity = 0.4
if x + w + pivot_img_size < resolution_x:
# right
cv2.rectangle(freeze_img
# , (x+w,y+20)
, (x + w, y)
, (x + w + pivot_img_size, y + h)
, (64, 64, 64), cv2.FILLED)
cv2.addWeighted(overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img)
elif x - pivot_img_size > 0:
# left
cv2.rectangle(freeze_img
# , (x-pivot_img_size,y+20)
, (x - pivot_img_size, y)
, (x, y + h)
, (64, 64, 64), cv2.FILLED)
cv2.addWeighted(overlay, opacity, freeze_img, 1 - opacity, 0, freeze_img)
assert isinstance(emotion_df.iterrows, object)
for index, instance in emotion_df.iterrows():
emotion_label = "%s " % (instance['emotion'])
emotion_score = instance['score'] / 100
bar_x = 35 # this is the size if an emotion is 100%
bar_x = int(bar_x * emotion_score)
if x + w + pivot_img_size < resolution_x:
text_location_y = y + 20 + (index + 1) * 20
text_location_x = x + w
if text_location_y < y + h:
cv2.putText(freeze_img, emotion_label, (text_location_x, text_location_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.rectangle(freeze_img
, (x + w + 70, y + 13 + (index + 1) * 20)
, (x + w + 70 + bar_x, y + 13 + (index + 1) * 20 + 5)
, (255, 255, 255), cv2.FILLED)
elif x - pivot_img_size > 0:
text_location_y = y + 20 + (index + 1) * 20
text_location_x = x - pivot_img_size
if text_location_y <= y + h:
cv2.putText(freeze_img, emotion_label, (text_location_x, text_location_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.rectangle(freeze_img
, (x - pivot_img_size + 70, y + 13 + (index + 1) * 20)
, (x - pivot_img_size + 70 + bar_x, y + 13 + (index + 1) * 20 + 5)
, (255, 255, 255), cv2.FILLED)
# -------------------------------
# face_224 = functions.preprocess_face(img = custom_face, target_size = (224, 224), grayscale = False, enforce_detection = False)
tic = time.time() # in this way, freezed image can show 5 seconds
cv2.imshow('img', freeze_img)
freezed_frame = freezed_frame + 1
face_detected = False
face_included_frames = 0
freeze = False
freezed_frame = 0
else:
cv2.imshow('img', img)
print('Execution speed: %f sec' % (time.time() - _start))
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# kill open cv things
cap.release()
cv2.destroyAllWindows()
|
the-stack_0_14485 |
from itertools import product
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from seaborn._core.moves import Dodge, Jitter, Shift, Stack
from seaborn._core.rules import categorical_order
from seaborn._core.groupby import GroupBy
import pytest
class MoveFixtures:
@pytest.fixture
def df(self, rng):
n = 50
data = {
"x": rng.choice([0., 1., 2., 3.], n),
"y": rng.normal(0, 1, n),
"grp2": rng.choice(["a", "b"], n),
"grp3": rng.choice(["x", "y", "z"], n),
"width": 0.8,
"baseline": 0,
}
return pd.DataFrame(data)
@pytest.fixture
def toy_df(self):
data = {
"x": [0, 0, 1],
"y": [1, 2, 3],
"grp": ["a", "b", "b"],
"width": .8,
"baseline": 0,
}
return pd.DataFrame(data)
@pytest.fixture
def toy_df_widths(self, toy_df):
toy_df["width"] = [.8, .2, .4]
return toy_df
@pytest.fixture
def toy_df_facets(self):
data = {
"x": [0, 0, 1, 0, 1, 2],
"y": [1, 2, 3, 1, 2, 3],
"grp": ["a", "b", "a", "b", "a", "b"],
"col": ["x", "x", "x", "y", "y", "y"],
"width": .8,
"baseline": 0,
}
return pd.DataFrame(data)
class TestJitter(MoveFixtures):
def get_groupby(self, data, orient):
other = {"x": "y", "y": "x"}[orient]
variables = [v for v in data if v not in [other, "width"]]
return GroupBy(variables)
def check_same(self, res, df, *cols):
for col in cols:
assert_series_equal(res[col], df[col])
def check_pos(self, res, df, var, limit):
assert (res[var] != df[var]).all()
assert (res[var] < df[var] + limit / 2).all()
assert (res[var] > df[var] - limit / 2).all()
def test_width(self, df):
width = .4
orient = "x"
groupby = self.get_groupby(df, orient)
res = Jitter(width=width)(df, groupby, orient)
self.check_same(res, df, "y", "grp2", "width")
self.check_pos(res, df, "x", width * df["width"])
def test_x(self, df):
val = .2
orient = "x"
groupby = self.get_groupby(df, orient)
res = Jitter(x=val)(df, groupby, orient)
self.check_same(res, df, "y", "grp2", "width")
self.check_pos(res, df, "x", val)
def test_y(self, df):
val = .2
orient = "x"
groupby = self.get_groupby(df, orient)
res = Jitter(y=val)(df, groupby, orient)
self.check_same(res, df, "x", "grp2", "width")
self.check_pos(res, df, "y", val)
def test_seed(self, df):
kws = dict(width=.2, y=.1, seed=0)
orient = "x"
groupby = self.get_groupby(df, orient)
res1 = Jitter(**kws)(df, groupby, orient)
res2 = Jitter(**kws)(df, groupby, orient)
for var in "xy":
assert_series_equal(res1[var], res2[var])
class TestDodge(MoveFixtures):
# First some very simple toy examples
def test_default(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge()(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3]),
assert_array_almost_equal(res["x"], [-.2, .2, 1.2])
assert_array_almost_equal(res["width"], [.4, .4, .4])
def test_fill(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge(empty="fill")(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3]),
assert_array_almost_equal(res["x"], [-.2, .2, 1])
assert_array_almost_equal(res["width"], [.4, .4, .8])
def test_drop(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge("drop")(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1])
assert_array_almost_equal(res["width"], [.4, .4, .4])
def test_gap(self, toy_df):
groupby = GroupBy(["x", "grp"])
res = Dodge(gap=.25)(toy_df, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1.2])
assert_array_almost_equal(res["width"], [.3, .3, .3])
def test_widths_default(self, toy_df_widths):
groupby = GroupBy(["x", "grp"])
res = Dodge()(toy_df_widths, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.08, .32, 1.1])
assert_array_almost_equal(res["width"], [.64, .16, .2])
def test_widths_fill(self, toy_df_widths):
groupby = GroupBy(["x", "grp"])
res = Dodge(empty="fill")(toy_df_widths, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.08, .32, 1])
assert_array_almost_equal(res["width"], [.64, .16, .4])
def test_widths_drop(self, toy_df_widths):
groupby = GroupBy(["x", "grp"])
res = Dodge(empty="drop")(toy_df_widths, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3])
assert_array_almost_equal(res["x"], [-.08, .32, 1])
assert_array_almost_equal(res["width"], [.64, .16, .2])
def test_faceted_default(self, toy_df_facets):
groupby = GroupBy(["x", "grp", "col"])
res = Dodge()(toy_df_facets, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, .8, .2, .8, 2.2])
assert_array_almost_equal(res["width"], [.4] * 6)
def test_faceted_fill(self, toy_df_facets):
groupby = GroupBy(["x", "grp", "col"])
res = Dodge(empty="fill")(toy_df_facets, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1, 0, 1, 2])
assert_array_almost_equal(res["width"], [.4, .4, .8, .8, .8, .8])
def test_faceted_drop(self, toy_df_facets):
groupby = GroupBy(["x", "grp", "col"])
res = Dodge(empty="drop")(toy_df_facets, groupby, "x")
assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3])
assert_array_almost_equal(res["x"], [-.2, .2, 1, 0, 1, 2])
assert_array_almost_equal(res["width"], [.4] * 6)
def test_orient(self, toy_df):
df = toy_df.assign(x=toy_df["y"], y=toy_df["x"])
groupby = GroupBy(["y", "grp"])
res = Dodge("drop")(df, groupby, "y")
assert_array_equal(res["x"], [1, 2, 3])
assert_array_almost_equal(res["y"], [-.2, .2, 1])
assert_array_almost_equal(res["width"], [.4, .4, .4])
# Now tests with slightly more complicated data
@pytest.mark.parametrize("grp", ["grp2", "grp3"])
def test_single_semantic(self, df, grp):
groupby = GroupBy(["x", grp])
res = Dodge()(df, groupby, "x")
levels = categorical_order(df[grp])
w, n = 0.8, len(levels)
shifts = np.linspace(0, w - w / n, n)
shifts -= shifts.mean()
assert_series_equal(res["y"], df["y"])
assert_series_equal(res["width"], df["width"] / n)
for val, shift in zip(levels, shifts):
rows = df[grp] == val
assert_series_equal(res.loc[rows, "x"], df.loc[rows, "x"] + shift)
def test_two_semantics(self, df):
groupby = GroupBy(["x", "grp2", "grp3"])
res = Dodge()(df, groupby, "x")
levels = categorical_order(df["grp2"]), categorical_order(df["grp3"])
w, n = 0.8, len(levels[0]) * len(levels[1])
shifts = np.linspace(0, w - w / n, n)
shifts -= shifts.mean()
assert_series_equal(res["y"], df["y"])
assert_series_equal(res["width"], df["width"] / n)
for (v2, v3), shift in zip(product(*levels), shifts):
rows = (df["grp2"] == v2) & (df["grp3"] == v3)
assert_series_equal(res.loc[rows, "x"], df.loc[rows, "x"] + shift)
class TestStack(MoveFixtures):
def test_basic(self, toy_df):
groupby = GroupBy(["color", "group"])
res = Stack()(toy_df, groupby, "x")
assert_array_equal(res["x"], [0, 0, 1])
assert_array_equal(res["y"], [1, 3, 3])
assert_array_equal(res["baseline"], [0, 1, 0])
def test_faceted(self, toy_df_facets):
groupby = GroupBy(["color", "group"])
res = Stack()(toy_df_facets, groupby, "x")
assert_array_equal(res["x"], [0, 0, 1, 0, 1, 2])
assert_array_equal(res["y"], [1, 3, 3, 1, 2, 3])
assert_array_equal(res["baseline"], [0, 1, 0, 0, 0, 0])
def test_misssing_data(self, toy_df):
df = pd.DataFrame({
"x": [0, 0, 0],
"y": [2, np.nan, 1],
"baseline": [0, 0, 0],
})
res = Stack()(df, None, "x")
assert_array_equal(res["y"], [2, np.nan, 3])
assert_array_equal(res["baseline"], [0, np.nan, 2])
def test_baseline_homogeneity_check(self, toy_df):
toy_df["baseline"] = [0, 1, 2]
groupby = GroupBy(["color", "group"])
move = Stack()
err = "Stack move cannot be used when baselines"
with pytest.raises(RuntimeError, match=err):
move(toy_df, groupby, "x")
class TestShift(MoveFixtures):
def test_default(self, toy_df):
gb = GroupBy(["color", "group"])
res = Shift()(toy_df, gb, "x")
for col in toy_df:
assert_series_equal(toy_df[col], res[col])
@pytest.mark.parametrize("x,y", [(.3, 0), (0, .2), (.1, .3)])
def test_moves(self, toy_df, x, y):
gb = GroupBy(["color", "group"])
res = Shift(x=x, y=y)(toy_df, gb, "x")
assert_array_equal(res["x"], toy_df["x"] + x)
assert_array_equal(res["y"], toy_df["y"] + y)
|
the-stack_0_14487 | """
Plotting model residuals
========================
"""
import numpy as np
import seaborn as sns
sns.set(style="whitegrid")
# Make an example dataset with y ~ x
rs = np.random.RandomState(7)
x = rs.normal(2, 1, 75)
y = 2 + 1.5 * x + rs.normal(0, 2, 75)
# Plot the residuals after fitting a linear model
sns.residplot(x, y, lowess=True, color="g")
|
the-stack_0_14488 | import logging
from moneywagon import (
get_unspent_outputs, CurrentPrice, get_optimal_fee, PushTx,
get_onchain_exchange_rates,
get_current_price)
from moneywagon.core import get_optimal_services, get_magic_bytes
from bitcoin import mktx, sign, pubtoaddr, privtopub
from .crypto_data import crypto_data
from .currency_support import CurrencySupport
class Transaction(object):
def __init__(self, crypto, hex=None, verbose=False):
c = CurrencySupport()
if crypto not in c.supported_currencies('moneywagon', 'transaction'):
form = crypto_data[crypto]['transaction_form']
raise NotImplementedError("%s not yet supported (tx form: %s)" % (
crypto.upper(), form
))
self.change_address = None
self.crypto = crypto
self.fee_satoshi = None
self.outs = []
self.ins = []
self.onchain_rate = None
self.verbose = verbose
if hex:
self.hex = hex
def from_unit_to_satoshi(self, value, unit='satoshi'):
"""
Convert a value to satoshis. units can be any fiat currency.
By default the unit is satoshi.
"""
logging.info("from_unit_to_satoshi : Value: {}, unit: {}".format(value, unit))
if not unit or unit == 'satoshi':
return value
if unit == 'bitcoin' or unit == 'btc':
return value * 1e8
# assume fiat currency that we can convert
convert = get_current_price(self.crypto, unit)
if isinstance(convert, tuple):
convert = convert[0]
logging.info("from_unit_to_satoshi : Convert: {}".format(convert))
return int(value / convert * 1e8)
def add_raw_inputs(self, inputs, private_key=None):
"""
Add a set of utxo's to this transaction. This method is better to use if you
want more fine control of which inputs get added to a transaction.
`inputs` is a list of "unspent outputs" (they were 'outputs' to previous transactions,
and 'inputs' to subsiquent transactions).
`private_key` - All inputs will be signed by the passed in private key.
"""
for i in inputs:
self.ins.append(dict(input=i, private_key=private_key))
self.change_address = i['address']
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
)
def private_key_to_address(self, pk):
"""
Convert a private key (in hex format) into an address.
"""
pub = privtopub(pk)
pub_byte, priv_byte = get_magic_bytes(self.crypto)
if priv_byte >= 128:
priv_byte -= 128 #pybitcointools bug
return pubtoaddr(pub, pub_byte)
def add_inputs(self, private_key=None, address=None, amount='all', max_ins=None, password=None, services=None, **modes):
"""
Make call to external service to get inputs from an address and/or private_key.
`amount` is the amount of [currency] worth of inputs (in satoshis) to add from
this address. Pass in 'all' (the default) to use *all* inputs found for this address.
Returned is the number of units (in satoshis) that were added as inputs to this tx.
"""
if private_key:
if private_key.startswith('6P'):
if not password:
raise Exception("Password required for BIP38 encoded private keys")
from .bip38 import Bip38EncryptedPrivateKey
private_key = Bip38EncryptedPrivateKey(self.crypto, private_key).decrypt(password)
address_from_priv = self.private_key_to_address(private_key)
if address and address != address_from_priv:
raise Exception("Invalid Private key")
address = address_from_priv
self.change_address = address
if not services:
services = get_optimal_services(self.crypto, 'unspent_outputs')
total_added_satoshi = 0
ins = 0
for utxo in self._get_utxos(address, services, **modes):
if max_ins and ins >= max_ins:
break
if (amount == 'all' or total_added_satoshi < amount):
ins += 1
self.ins.append(
dict(input=utxo, private_key=private_key)
)
total_added_satoshi += utxo['amount']
return total_added_satoshi, ins
def total_input_satoshis(self):
"""
Add up all the satoshis coming from all input tx's.
"""
just_inputs = [x['input'] for x in self.ins]
return sum([x['amount'] for x in just_inputs])
def select_inputs(self, amount):
'''Maximize transaction priority. Select the oldest inputs,
that are sufficient to cover the spent amount. Then,
remove any unneeded inputs, starting with
the smallest in value.
Returns sum of amounts of inputs selected'''
sorted_txin = sorted(self.ins, key=lambda x:-x['input']['confirmations'])
total_amount = 0
for (idx, tx_in) in enumerate(sorted_txin):
total_amount += tx_in['input']['amount']
if (total_amount >= amount):
break
sorted_txin = sorted(sorted_txin[:idx+1], key=lambda x:x['input']['amount'])
for (idx, tx_in) in enumerate(sorted_txin):
value = tx_in['input']['amount']
if (total_amount - value < amount):
break
else:
total_amount -= value
self.ins = sorted_txin[idx:]
return total_amount
def add_output(self, address, value, unit='satoshi'):
"""
Add an output (a person who will receive funds via this tx).
If no unit is specified, satoshi is implied.
"""
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f)" % (
value_satoshi, (value_satoshi / 1e8)
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit='satoshi'):
"""
This method is like `add_output` but it sends to another
"""
self.onchain_rate = get_onchain_exchange_rates(
self.crypto, withdraw_crypto, best=True, verbose=self.verbose
)
exchange_rate = float(self.onchain_rate['rate'])
result = self.onchain_rate['service'].get_onchain_exchange_address(
self.crypto, withdraw_crypto, withdraw_address
)
address = result['deposit']
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % (
value_satoshi, (value_satoshi / 1e8),
exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper()
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
# convert = get_current_price(self.crypto, "usd")
# self.fee_satoshi = int(0.02 / convert * 1e8)
self.fee_satoshi = int(2000)
verbose = "Using default fee of:"
elif value == 'optimal':
# self.fee_satoshi = get_optimal_fee(
# self.crypto, self.estimate_size(), verbose=self.verbose
# )
self.fee_satoshi = int(2000)
verbose = "Using optimal fee of:"
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = "Using manually set fee of:"
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, "usd")
fee_dollar = convert * self.fee_satoshi / 1e8
print(verbose + " %s satoshis ($%.2f)" % (self.fee_satoshi, fee_dollar))
def estimate_size(self):
"""
Estimate how many bytes this transaction will be by countng inputs
and outputs.
Formula taken from: http://bitcoin.stackexchange.com/a/3011/18150
"""
# if there are no outs use 1 (because the change will be an out)
outs = len(self.outs) or 1
return outs * 34 + 148 * len(self.ins) + 10
def get_hex(self, signed=True):
"""
Given all the data the user has given so far, make the hex using pybitcointools
"""
logging.info('2')
total_ins_satoshi = self.total_input_satoshis()
logging.info('3')
if total_ins_satoshi == 0:
raise ValueError("Can't make transaction, there are zero inputs")
logging.info('4')
# Note: there can be zero outs (sweep or coalesc transactions)
total_outs_satoshi = sum([x['value'] for x in self.outs])
logging.info('5')
if not self.fee_satoshi:
self.fee() # use default of $0.02
logging.info('6')
change_satoshi = total_ins_satoshi - (total_outs_satoshi + self.fee_satoshi)
logging.info('7')
if change_satoshi < 0:
raise ValueError(
"Input amount (%s) must be more than all output amounts (%s) plus fees (%s). You need more %s."
% (total_ins_satoshi, total_outs_satoshi, self.fee_satoshi, self.crypto.upper())
)
logging.info('8')
ins = [x['input'] for x in self.ins]
logging.info('9')
if change_satoshi > 0:
if self.verbose:
print("Adding change address of %s satoshis to %s" % (change_satoshi, self.change_address))
change = [{'value': change_satoshi, 'address': self.change_address}]
logging.info('10')
else:
change = [] # no change ?!
if self.verbose: print("Inputs == Outputs, no change address needed.")
logging.info('11')
tx = mktx(ins, self.outs + change)
logging.info('12')
if signed:
for i, input_data in enumerate(self.ins):
logging.info('13')
if not input_data['private_key']:
raise Exception("Can't sign transaction, missing private key for input %s" % i)
tx = sign(tx, i, input_data['private_key'])
logging.info('14')
return tx
def push(self, services=None, redundancy=1):
if not services:
services = get_optimal_services(self.crypto, "push_tx")
self.pushers = []
pusher = PushTx(services=services, verbose=self.verbose)
results = [pusher.action(self.crypto, self.get_hex())]
try:
for service in services[1:redundancy-1]:
pusher = PushTx(services=[service], verbose=self.verbose)
results.append(self.pusher.action(self.crypto, self.get_hex()))
self.pushers.append(pusher)
except:
raise Exception("Partial push. Some services returned success, some failed.")
return results
|
the-stack_0_14489 | # Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import hashlib
import os.path
import platform
import re
import shutil
from flask_babel import lazy_gettext as _
def get_tree_size(start_path):
"""
return size (in bytes) of filesystem tree
"""
if not os.path.exists(start_path):
raise ValueError(_("Incorrect path: %(start_path)s", start_path=start_path))
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def get_python_file_dst(dirname, basename):
basename = os.path.basename(basename)
(root, ext) = os.path.splitext(basename)
if ext != '.py' and ext != '.pyc':
ValueError(_('Python file, %(basename)s, needs .py or .pyc extension.', basename=basename))
filename = os.path.join(dirname, 'digits_python_layers' + ext)
if os.path.isfile(filename):
ValueError(_('Python file, %(filename)s, already exists.', filename=filename))
return filename
def copy_python_layer_file(from_client, job_dir, client_file, server_file):
if from_client and client_file:
filename = get_python_file_dst(job_dir, client_file.filename)
client_file.save(filename)
elif server_file and len(server_file) > 0:
filename = get_python_file_dst(job_dir, server_file)
shutil.copy(server_file, filename)
def tail(file, n=40):
"""
Returns last n lines of text file (or all lines if the file has fewer lines)
Arguments:
file -- full path of that file, calling side must ensure its existence
n -- the number of tailing lines to return
"""
if platform.system() in ['Linux', 'Darwin']:
import subprocess
output = subprocess.check_output(['tail', '-n{}'.format(n), file])
else:
from collections import deque
tailing_lines = deque()
with open(file) as f:
for line in f:
tailing_lines.append(line)
if len(tailing_lines) > n:
tailing_lines.popleft()
output = ''.join(tailing_lines)
return output
def dir_hash(dir_name):
"""
Return a hash for the files in a directory tree, excluding hidden
files and directoies. If any files are renamed, added, removed, or
modified the hash will change.
"""
if not os.path.isdir(dir_name):
raise TypeError(_('%(dir_name)s is not a directory.', dir_name=dir_name))
md5 = hashlib.md5()
for root, dirs, files in os.walk(dir_name, topdown=True):
# Skip if the root has a hidden directory in its path
if not re.search(r'/\.', root):
for f in files:
# Skip if the file is hidden
if not f.startswith('.') and not re.search(r'/\.', f):
# Change the hash if the file name changes
file_name = os.path.join(root, f).encode('utf-8')
md5.update(hashlib.md5(file_name).hexdigest().encode('utf-8'))
# Change the hash if the file content changes
data = open(file_name, 'rb').read()
md5.update(hashlib.md5(data).hexdigest().encode('utf-8'))
return md5.hexdigest()
|
the-stack_0_14490 | #!/usr/bin/env python3
# Write a Shannon entropy calculator: H = -sum(pi * log(pi))
# The values should come from the command line
# E.g. python3 entropy.py 0.4 0.3 0.2 0.1
# Put the probabilities into a new list
# Don't forget to convert them to numbers
import math
import sys
numbers = []
for item in sys.argv[1:]:
numbers.append(float(item))
print(numbers)
H = 0
H += numbers[0] * math.log2(numbers[0])
H += numbers[1] * math.log2(numbers[1])
H += numbers[2] * math.log2(numbers[2])
H += numbers[3] * math.log2(numbers[3])
print(-H)
H = 0
for i in range(len(numbers)):
H += numbers[i] * math.log2(numbers[i])
print(H)
H = 0
for p in numbers:
H += p * math.log2(p)
print(-H)
"""
python3 31entropy.py 0.1 0.2 0.3 0.4
1.846
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.