repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sumpfgottheit/arps | arps_old/restserver/views/taskresult.py | 1 | 3216 | # -*- coding: utf-8 -*-
__author__ = 'saf'
import logging
from flask import render_template, url_for, request
from flask.views import View
from arps_old.models import CeleryResult
from arps_old.restserver import app, redis_conn
log = logging.getLogger(__name__)
class TaskResultView(View):
methods = ['GET', ]
endpoint = 'endpoint_taskresult_detail'
endpoint_list = 'endpoint_taskresult_list'
endpoint_ajax_results = 'endpoint_taskresults_ajax'
template = 'taskresult/taskresult_detail.html'
template_list = 'taskresult/taskresult_list.html'
def dispatch_request(self, *args, **kwargs):
_id = kwargs.get('id', None)
if request.endpoint == self.endpoint_ajax_results:
return self.ajax_results()
if request.endpoint == self.endpoint_list:
return self.list(_id)
elif _id is not None:
return self.show_object(_id)
self.return_404(_id)
def ajax_results(self):
T = TaskUpdateRepoMetadataMetaStore
results_for_repo = T.query.join(T.result).filter(T.release_id == release_id, T.repository_id == repository_id).order_by(CeleryResult.start.desc()).all()
results_for_repo = [r.result for r in results_for_repo]
results = []
for result in results_for_repo:
results.append(result.json)
results[-1]['detail_url'] = url_for(TaskResultView.endpoint, id=result.id)
return jsonify({'data': results})
def list(self, task):
results = CeleryResult.query.filter_by(task=task).order_by(CeleryResult.submitted.desc()).limit(20).all()
return render_template(self.template_list, results=results, task=task)
def show_object(self, _id):
result = CeleryResult.query.get(_id)
if redis_conn.llen(_id) > app.config['MAX_LINES_FOR_STDOUT_ERR']:
a = redis_conn.lrange(_id, 0, app.config['MAX_LINES_FOR_STDOUT_ERR'] // 2)
b = redis_conn.lrange(_id, redis_conn.llen(_id) - app.config['MAX_LINES_FOR_STDOUT_ERR'] // 2, -1)
n = redis_conn.llen(_id) - app.config['MAX_LINES_FOR_STDOUT_ERR']
a = [(int(line[0]), line[1:]) for line in [line.decode('utf-8') for line in a]]
b = [(int(line[0]), line[1:]) for line in [line.decode('utf-8') for line in b]]
c = [(3, '========================================================' + "=" * len(str(n))),
(3, '============ TOO MUCH DATA - SKIPPED {} LINES ============'.format(n)),
(3, '========================================================' + "=" * len(str(n)))]
lines = a + c + b
else:
lines = redis_conn.lrange(_id, 0, -1)
lines = [(int(line[0]), line[1:]) for line in [line.decode('utf-8') for line in lines]]
return render_template(self.template, result=result, lines=reversed(lines))
taskresult_view = TaskResultView.as_view(TaskResultView.endpoint)
app.add_url_rule('/tasks/detail/<id>', view_func=taskresult_view)
app.add_url_rule('/tasks', view_func=taskresult_view, endpoint=TaskResultView.endpoint_list)
app.add_url_rule('/tasks/ajax', view_func=taskresult_view, endpoint=TaskResultView.endpoint_ajax_results)
| mit | 5,123,974,780,708,314,000 | 43.054795 | 160 | 0.598881 | false |
twneale/rexlex | rexlex/log_config.py | 1 | 7091 | '''
Establish custom log levels for rexlexer's verbose output.
'''
import logging
from rexlex.config import LOG_MSG_MAXWIDTH
# ---------------------------------------------------------------------------
# Establish custom log levels.
# ---------------------------------------------------------------------------
# Used to report tokens getting yielded.
REXLEX_TRACE_RESULT = 9
# Used to report starting, stopping, etc.
REXLEX_TRACE_META = 8
# Used to report changes to lexer state.
REXLEX_TRACE_STATE = 7
# Used to report on specific rules.
REXLEX_TRACE_RULE = 6
# Used to dump as much info as possible.
REXLEX_TRACE = 5
REXLEX_LOG_LEVELS = (
(REXLEX_TRACE_RESULT, 'REXLEX_TRACE_RESULT', 'rexlex_trace_result'),
(REXLEX_TRACE_META, 'REXLEX_TRACE_META', 'rexlex_trace_meta'),
(REXLEX_TRACE_STATE, 'REXLEX_TRACE_STATE', 'rexlex_trace_state'),
(REXLEX_TRACE_RULE, 'REXLEX_TRACE_RULE', 'rexlex_trace_rule'),
(REXLEX_TRACE, 'REXLEX_TRACE', 'rexlex_trace'),
)
for loglevel, loglevel_name, method_name in REXLEX_LOG_LEVELS:
logging.addLevelName(loglevel, loglevel_name)
def rexlex_trace_result(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_RESULT):
self._log(REXLEX_TRACE_RESULT, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_result', rexlex_trace_result)
def rexlex_trace_meta(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_META):
self._log(REXLEX_TRACE_META, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_meta', rexlex_trace_meta)
def rexlex_trace_state(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_STATE):
self._log(REXLEX_TRACE_STATE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_state', rexlex_trace_state)
def rexlex_trace_rule(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE_RULE):
self._log(REXLEX_TRACE_RULE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace_rule', rexlex_trace_rule)
def rexlex_trace(self, message, *args, **kws):
if self.isEnabledFor(REXLEX_TRACE):
self._log(REXLEX_TRACE, message, args, **kws)
setattr(logging.Logger, 'rexlex_trace', rexlex_trace)
# ---------------------------------------------------------------------------
# Colorize them.
# ---------------------------------------------------------------------------
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved.
# Licensed under the new BSD license.
#
import ctypes
import logging
import os
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
#levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = {
REXLEX_TRACE: (None, 'blue', True),
REXLEX_TRACE_RULE: (None, 'white', False),
REXLEX_TRACE_STATE: (None, 'yellow', True),
REXLEX_TRACE_META: (None, 'red', True),
REXLEX_TRACE_RESULT: ('red', 'white', True),
}
else:
level_map = {
REXLEX_TRACE: (None, 'blue', False),
REXLEX_TRACE_RULE: (None, 'white', False),
REXLEX_TRACE_STATE: (None, 'yellow', False),
REXLEX_TRACE_META: (None, 'red', False),
REXLEX_TRACE_RESULT: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
# bluff for Jenkins
if os.environ.get('JENKINS_URL'):
return True
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if os.name != 'nt':
def output_colorized(self, message): # NOQA
self.stream.write(message)
else:
import re
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message): # NOQA
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h,
color)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
| bsd-3-clause | -7,196,659,918,402,147,000 | 33.42233 | 77 | 0.502468 | false |
mganeva/mantid | Framework/PythonInterface/test/python/mantid/geometry/IPeakTest.py | 1 | 7192 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.kernel import V3D
from mantid.simpleapi import CreateSimulationWorkspace, CreatePeaksWorkspace
import numpy as np
import numpy.testing as npt
class IPeakTest(unittest.TestCase):
def setUp(self):
# IPeak cannot currently be instatiated so this is a quick way
# getting a handle to a peak object
ws = CreateSimulationWorkspace("SXD", BinParams="1,1,10")
peaks = CreatePeaksWorkspace(ws, 1)
self._peak = peaks.getPeak(0)
# tolerance for differences in q vectors that a recomputed
# on every call.
self._tolerance = 1e-2
def test_set_detector_id_with_valid_id(self):
det_id = 101
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getDetectorID(), det_id)
def test_set_detector_id_with_invalid_id(self):
det_id = -1
self.assertRaises(RuntimeError, self._peak.setDetectorID, det_id)
def test_set_run_number(self):
run_number = 101
self._peak.setRunNumber(run_number)
self.assertEqual(self._peak.getRunNumber(), run_number)
def test_set_monitor_count(self):
mon_count = 3
self._peak.setMonitorCount(mon_count)
self.assertEqual(self._peak.getMonitorCount(), mon_count)
def test_set_hkl_all_at_once(self):
H, K, L = 1, 2, 3
self._peak.setHKL(H, K, L)
self.assertEqual(self._peak.getH(), H)
self.assertEqual(self._peak.getK(), K)
self.assertEqual(self._peak.getL(), L)
def test_set_hkl_individually(self):
H, K, L = 1, 2, 3
self._peak.setH(H)
self._peak.setK(K)
self._peak.setL(L)
self.assertEqual(self._peak.getH(), H)
self.assertEqual(self._peak.getK(), K)
self.assertEqual(self._peak.getL(), L)
def test_set_q_lab_frame(self):
q_lab = V3D(0, 1, 1)
self._peak.setQLabFrame(q_lab)
npt.assert_allclose(self._peak.getQLabFrame(), q_lab, atol=self._tolerance)
npt.assert_allclose(self._peak.getQSampleFrame(), q_lab, atol=self._tolerance)
def test_set_q_sample_frame(self):
q_sample = V3D(0, 1, 1)
self._peak.setQSampleFrame(q_sample)
npt.assert_allclose(self._peak.getQSampleFrame(), q_sample, atol=self._tolerance)
npt.assert_allclose(self._peak.getQLabFrame(), q_sample, atol=self._tolerance)
def test_set_goniometer_matrix_with_valid_matrix(self):
angle = np.pi/4
rotation = np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
q_sample = V3D(1, 1, 1)
self._peak.setGoniometerMatrix(rotation)
self._peak.setQSampleFrame(q_sample)
q_lab = np.dot(self._peak.getQLabFrame(), rotation)
npt.assert_allclose(self._peak.getGoniometerMatrix(), rotation)
npt.assert_allclose(self._peak.getQSampleFrame(), q_sample, atol=self._tolerance)
npt.assert_allclose(q_lab, q_sample, atol=self._tolerance)
def test_set_goniometer_matrix_with_singular_matrix(self):
rotation = np.zeros((3,3))
self.assertRaises(ValueError, self._peak.setGoniometerMatrix, rotation)
def test_set_wavelength(self):
wavelength = 1.3
self._peak.setWavelength(wavelength)
self.assertAlmostEqual(self._peak.getWavelength(), wavelength)
def test_get_scattering(self):
det_id = 101
expected_scattering_angle = 2.878973314094696
self._peak.setDetectorID(det_id)
self.assertAlmostEqual(self._peak.getScattering(), expected_scattering_angle)
def test_get_tof(self):
det_id = 101
wavelength = 1.9
expected_tof = 4103.70182610731
self._peak.setDetectorID(det_id)
self._peak.setWavelength(wavelength)
self.assertEqual(self._peak.getTOF(), expected_tof)
def test_get_d_spacing(self):
det_id = 101
wavelength = 1.9
expected_d = 0.958249313959493
self._peak.setDetectorID(det_id)
self._peak.setWavelength(wavelength)
self.assertEqual(self._peak.getDSpacing(), expected_d)
def test_set_initial_energy(self):
initial_energy = 10.0
self._peak.setInitialEnergy(initial_energy)
self.assertAlmostEqual(self._peak.getInitialEnergy(), initial_energy)
def test_set_final_energy(self):
final_energy = 10.0
self._peak.setFinalEnergy(final_energy)
self.assertAlmostEqual(self._peak.getFinalEnergy(), final_energy)
def test_get_energy(self):
initial_energy = 10.0
final_energy = 10.0
self._peak.setFinalEnergy(final_energy)
self._peak.setInitialEnergy(initial_energy)
self.assertAlmostEqual(self._peak.getEnergyTransfer(), initial_energy - final_energy)
def test_set_intensity(self):
intensity = 10.0
self._peak.setIntensity(intensity)
self.assertAlmostEqual(self._peak.getIntensity(), intensity)
def test_set_sigma_intensity(self):
sigma = 10.0
self._peak.setSigmaIntensity(sigma)
self.assertAlmostEqual(self._peak.getSigmaIntensity(), sigma)
def test_get_intensity_over_sigma(self):
intensity = 100.0
sigma = 10.0
self._peak.setIntensity(intensity)
self._peak.setSigmaIntensity(sigma)
self.assertAlmostEqual(self._peak.getIntensityOverSigma(), intensity / sigma)
def test_set_bin_count(self):
bin_count = 10.0
self._peak.setBinCount(bin_count)
self.assertAlmostEqual(self._peak.getBinCount(), bin_count)
def test_get_row_and_column(self):
det_id = 101
row, col = 36, 1
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getRow(), row)
self.assertEqual(self._peak.getCol(), col)
def test_get_det_pos(self):
det_id = 101
expected_det_pos = np.array([0.061999, 0.0135, -0.236032])
self._peak.setDetectorID(det_id)
npt.assert_allclose(self._peak.getDetPos(), expected_det_pos, atol=self._tolerance)
def test_get_l1(self):
det_id = 101
expected_l1 = 8.3
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getL1(), expected_l1)
def test_get_l2(self):
det_id = 101
expected_l2 = 0.2444125610784556
self._peak.setDetectorID(det_id)
self.assertEqual(self._peak.getL2(), expected_l2)
def test_set_modulation_vector(self):
testVector = V3D(0.5,0,0.2)
testVectorOut = V3D(1, 0, 0)
self._peak.setIntMNP(testVector)
self.assertEqual(self._peak.getIntMNP(), testVectorOut)
def test_set_get_inthkl(self):
testVector = V3D(0.5,0,0.2)
self._peak.setIntHKL(testVector)
self.assertEqual(self._peak.getIntHKL(), V3D(1,0,0))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 1,140,818,208,100,997,400 | 34.60396 | 93 | 0.638348 | false |
supermitch/mech-ai | server/game.py | 1 | 2542 | import datetime
import json
import logging
import maps
import queue
import state
import utils
import world
class GAME_STATUS(object):
""" Game status constants. """
lobby = 'lobby' # In matchmaking lobby, waiting for all players
playing = 'playing' # In game mode, waiting for turns
complete = 'complete' # Game finished
cancelled = 'cancelled' # Broken?
class PLAYER_STATUS(object):
waiting = 'waiting' # Hasn't joined the lobby yet
joined = 'joined' # Has joined the lobby
playing = 'playing' # Sending moves and waiting for game state
lost = 'lost' # Missed turns/broken?
class Game(object):
def __init__(self, id=None, players=None, name='Mech AI', map_name='default', rounds=17):
"""
Initialize a new game.
Note that when we load a game from the repo, we init an empty
game, so all our arguments to the constructor are optional.
"""
self.id = id
self.name = name if name else 'Mech AI'
self.map_name = map_name if map_name else 'default'
self.players = players # List of player usernames
self.winner = None
self.status = GAME_STATUS.lobby
self.created = datetime.datetime.now()
# These attributes are persisted in the state, not DB properties
map = maps.get_map(self.map_name)
self.state = state.State(map=map, rounds=rounds, players=players)
self.queue = queue.Queue(players=players)
self.transactions = []
self.transactions.append({
'move': None,
'message': (True, 'Initial state'),
'state': self.state.jsonable,
})
@property
def not_joined(self):
""" Return list of unjoined players. """
return ', '.join(self.queue.not_joined)
def set_user_status(self, username, status):
""" Update Queue with new status. """
self.queue.set_status(username, status)
def update(self, username, move):
""" Execute a round. """
the_world = world.World(self) # Convert our self (a game object) into a World
success, reason = the_world.update(move)
if success:
self.queue.increment_move()
self.state.increment_turn()
if self.state.game_complete:
self.status = GAME_STATUS.complete
self.transactions.append({
'move': move,
'message': (success, reason),
'state': self.state.jsonable,
})
return success, reason
| mit | -3,785,946,842,478,620,700 | 30.382716 | 93 | 0.606609 | false |
rndusr/stig | stig/utils/__init__.py | 1 | 1595 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
from types import SimpleNamespace
from ._converter import DataSizeConverter
convert = SimpleNamespace(bandwidth=DataSizeConverter(),
size=DataSizeConverter())
def cached_property(fget=None, *, after_creation=None):
"""
Property that replaces itself with the requested value when accessed
`after_creation` is called with the instance of the property when the
property is accessed for the first time.
"""
# https://stackoverflow.com/a/6849299
class _cached_property():
def __init__(self, fget):
self._fget = fget
self._property_name = fget.__name__
self._after_creation = after_creation
self._cache = {}
def __get__(self, obj, cls):
value = self._fget(obj)
setattr(obj, self._property_name, value)
if self._after_creation is not None:
self._after_creation(obj)
return value
if fget is None:
return _cached_property
else:
return _cached_property(fget)
| gpl-3.0 | 4,341,910,219,779,885,600 | 34.444444 | 73 | 0.662696 | false |
enableiot/iotanalytics-gearpump-rule-engine | deployer/src/gearpump_api.py | 1 | 4687 | # Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
import urllib2
from urllib2 import Request, URLError
from poster.encode import multipart_encode
from api_config import GearpumpApiConfig
import util
import requests
import os
class GearpumpApi:
def __init__(self, uri, credentials):
self.gearpump_credentials = credentials
self.gearpump_uri = uri
self.gearpump_user_cookies = None
self.gearpump_app_config = None
print "Gearpump dashboard uri set to - " + self.gearpump_uri
print "Gearpump dashboard credentials - " + str(self.gearpump_credentials)
def __encode_and_prepare_datagen(self, filename):
datagen, headers = multipart_encode({"file": open(filename, "rb")})
datagen.params[0].name = 'jar'
datagen.params[0].filetype = 'application/x-java-archive'
self.__add_user_cookies_to_headers(headers)
return datagen, headers
def __add_user_cookies_to_headers(self, headers):
if self.gearpump_user_cookies is not None:
headers['Cookie'] = self.gearpump_user_cookies
def __create_user_headers_with_cookies(self):
headers = {}
self.__add_user_cookies_to_headers(headers)
return headers
def __submit_app_jar(self, filename):
datagen, headers = self.__encode_and_prepare_datagen(filename)
# Create the Request object
request_url = self.gearpump_uri + GearpumpApiConfig.call_submit
files = {
"args": (None, self.gearpump_app_config, 'application/json'),
"jar": (os.path.basename(filename), open(filename, "rb"), 'application/x-java-archive')
}
print headers
# Do the request and get the response
response = requests.post(request_url, files=files, headers=self.__create_user_headers_with_cookies())
return response
def __find_active_app_id_by_name(self, name):
request = Request(self.gearpump_uri + GearpumpApiConfig.call_applist,
headers=self.__create_user_headers_with_cookies())
json = util.call_api(request)
for app in json['appMasters']:
if app['appName'] == name and app['status'] == 'active':
return app['appId']
def __kill_app(self, app_id):
request = Request(self.gearpump_uri + GearpumpApiConfig.call_appmaster + "/" + str(app_id),
headers=self.__create_user_headers_with_cookies())
request.get_method = lambda: 'DELETE'
return util.call_api(request)
def __get_gearpump_user_cookies(self):
request_url = self.gearpump_uri + GearpumpApiConfig.call_login
body = self.gearpump_credentials
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
request = Request(url=request_url, data=urllib.urlencode(body), headers=headers)
sock = urllib2.urlopen(request)
cookies = sock.info()['Set-Cookie']
sock.read()
sock.close()
self.gearpump_user_cookies = self.__parse_gearpump_user_cookies(cookies)
def submit_app(self, filename, app_name, gearpump_app_config=None, force=False):
print "Gearpump rule engine config - " + str(gearpump_app_config)
self.gearpump_app_config = util.json_dict_to_string(gearpump_app_config).replace(" ", "")
if self.gearpump_credentials is not None:
self.__get_gearpump_user_cookies()
if force:
try:
self.__kill_app(self.__find_active_app_id_by_name(app_name))
except URLError as e:
print e.errno
print app_name + " was not running"
else:
print app_name + " was running and got killed"
self.__submit_app_jar(filename=filename)
print 'OK'
def __encode_gearpump_app_config(self, gearpump_app_config):
return urllib.quote(util.json_dict_to_string(gearpump_app_config).replace(" ", ""))
def __parse_gearpump_user_cookies(self, cookies):
return cookies.split(';')[0] + '; username=' + self.gearpump_credentials['username'] | apache-2.0 | 6,531,992,031,937,092,000 | 37.42623 | 109 | 0.643269 | false |
mozilla/stoneridge | srcleaner.py | 1 | 1738 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import logging
import os
import shutil
import sys
import time
import stoneridge
class StoneRidgeCleaner(object):
def __init__(self):
self.workdir = stoneridge.get_config('stoneridge', 'work')
self.keep = stoneridge.get_config_int('cleaner', 'keep')
def run(self):
logging.debug('cleaner running')
with stoneridge.cwd(self.workdir):
while True:
listing = os.listdir('.')
logging.debug('candidate files: %s' % (listing,))
directories = [l for l in listing
if os.path.isdir(l) and not l.startswith('.')]
logging.debug('directories: %s' % (directories,))
times = [(d, os.stat(d).st_mtime) for d in directories]
times.sort(key=lambda x: x[1])
delete_us = times[:-self.keep]
logging.debug('directories to delete: %s' % (delete_us,))
for d in delete_us:
logging.debug('removing %s' % (d,))
shutil.rmtree(d)
# Check again in a minute
time.sleep(60)
def daemon(args):
cleaner = StoneRidgeCleaner()
cleaner.run()
os.unlink(args.pidfile)
sys.exit(0)
@stoneridge.main
def main():
"""A simple cleanup program for stone ridge that blows away the working
directory
"""
parser = stoneridge.DaemonArgumentParser()
args = parser.parse_args()
parser.start_daemon(daemon, args=args)
| mpl-2.0 | -5,283,611,898,606,058,000 | 27.966667 | 78 | 0.581703 | false |
samyk/natpinning | server/modules/web.py | 1 | 6500 | #!/usr/bin/env python
#filename=web.py
#This module acts as a very simple HTTP webserver and will feed the exploit page.
from base import *
import socket
import random
import struct
import select
import time
import uuid
import base64
from datetime import datetime
class HTTPProtoHandler(asyncore.dispatcher_with_send):
REQPAGE = ""
REQHEADER = ""
REQHEADERDONE = 0
def __init__(self,conn_sock, client_address, server):
self.REQHEADERDONE = 0
self.REQHEADER = ""
self.server=server
asyncore.dispatcher_with_send.__init__(self,conn_sock) #Line is required
self.server.log("Received connection from " + client_address[0] + ' on port ' + str(self.server.sPort),3)
def get_header(self,req,header_name,splitter=":"):
headers=req.split("\n")
result = ""
for header in headers:
headerparts = header.split(splitter)
if len(headerparts)>1:
if headerparts[0].strip().upper()==header_name.upper():
result = header.strip()
return result
def handle_cmd(self,command):
"""Validates command structure, sends data for processing to engine (self.server.CALLER) and returns output to client"""
cmd_parts = command.split("_")
cmd = cmd_parts[0].upper().strip()
result=""
if cmd=="REG":
if len(cmd_parts)!=2:
self.server.log("Received invalid REG command : " + command,2)
else:
client_ip = cmd_parts[1].strip()
if self.server.CALLER.isValidIPv4(client_ip)!=True:
self.server.log("Received invalid IP for REG command : " + command,2)
else:
client_id = self.server.CALLER.registerVictim(self,client_ip)
return client_id
elif cmd=="POLL":
if len(cmd_parts)!=2:
self.server.log("Received invalid POLL command : " + command,2)
else:
client_id = cmd_parts[1].strip()
client = self.server.CALLER.getVictimByVictimId(client_id)
if client != None:
client.LAST_SEEN= datetime.now()
for test in client.TESTS:
if test.STATUS=="NEW":
result = test.getTestString()
break
else:
self.server.log("Received POLL command for unknown client: " + command,4)
elif cmd=="ADD":
if len(cmd_parts)!=5:
self.server.log("Received invalid ADD command : " + command,2)
else:
client_id = cmd_parts[1].strip()
client = self.server.CALLER.getVictimByVictimId(client_id)
if client != None:
client.LAST_SEEN= datetime.now()
proto = cmd_parts[2].strip().upper()
ip = cmd_parts[3].strip()
port = cmd_parts[4].strip()
if proto in self.server.CALLER.PROTOS and self.server.CALLER.isValidIPv4(ip) and self.server.CALLER.isValidPort(port):
#distrust whatever comes from the web
result = client.addTest(proto,ip,port)
else:
self.server.log("Received invalid ADD command : " + command,2)
else:
self.server.log("Received ADD command for unknown client: " + command,4)
elif cmd=="STATUS":
if len(cmd_parts)!= 2:
self.server.log("Received invalid STATUS command : " + command,2)
else:
test = self.server.CALLER.getVictimTest(cmd_parts[1].strip())
if test != None:
result = test.STATUS + " " + str(test.RESULT)
else:
result = "0"
elif cmd=="GENFLASH":
if len(cmd_parts)!= 3:
self.server.log("Received invalid GENFLASH command : " + command,2)
else:
result ="""<object width="1" height="1" data="exploit.swf" type="application/x-shockwave-flash" class="myclass" id="myid">
<param value="transparent" name="wmode">
<param value="allways" name="allowScriptAccess">
<param value="ci="""+cmd_parts[1]+"""&server="""+cmd_parts[2]+"""&cmdURL=http://"""+cmd_parts[2]+"""/cli" name="FlashVars">
</object>
"""
elif cmd=="LIST":
if len(cmd_parts)!= 2:
self.server.log("Received invalid LIST command : " + command,2)
else:
client_id = cmd_parts[1].strip()
client = self.server.CALLER.getVictimByVictimId(client_id)
if client != None:
for test in client.TESTS:
result = result + test.TEST_ID + "|" + test.STATUS + "|" + test.TEST_TYPE + "|" + str(test.RESULT) + "|" + test.PUBLIC_IP + "|" + test.PRIVATE_IP + "|" + test.PUBLIC_PORT + "|" + test.PRIVATE_PORT + "\n"
if result=="": result="0"
return result
def handle_read(self):
data = self.recv(1024)
request = self.get_header(data,"GET", " ")
cookie = self.get_header(data,"cookie", ":")
if cookie == "":
cookie = base64.urlsafe_b64encode(uuid.uuid4().bytes).replace("=","")
else:
cookie = cookie.split(" ")[1]
_page = ""
if request <>"":
headerparts = request.split(" ")
if headerparts[0]=="GET":
_page = headerparts[1].replace("/","")
if _page =="": _page = "exploit.html"
self.server.log("Victim requested page: " + _page,3)
_page=_page.lower()
page = _page.split("?")[0];
if page != "":
arrPages = ["admin.html","exploit.swf","admin.css","admin.js","tools.js","screen.js","gremwell_logo.png","exploit.html","exploit.css","exploit.js"]
arrCommands = ["cli"]
if page in arrPages:
agent = self.get_header(data,"USER-AGENT",":")
self.server.log("---" + agent,4)
respheader="""HTTP/1.1 200 OK\r\nContent-Type: text;html; charset=UTF-8\r\nServer: NatPin Exploit Server\r\nSet-Cookie: $cookie$\r\nContent-Length: $len$\r\n\r\n"""
f = open("exploit/"+page,"r")
body = f.read()
f.close()
elif page in arrCommands:
respheader="""HTTP/1.1 200 OK\r\nContent-Type: text;html; charset=UTF-8\r\nServer: NatPin Exploit Server\r\nSet-Cookie: $cookie$\r\nContent-Length: $len$\r\n\r\n"""
body=""
if page=="cli":
if len(_page.split("?"))!=2:
body ="Invalid command."
else:
body=self.handle_cmd(_page.split("?")[1].strip())
else:
body=""
else:
respheader="""HTTP/1.1 404 NOT FOUND\r\nServer: NatPin Exploit Server\r\nSet-Cookie: $cookie$\r\nContent-Length: 0\r\n\r\n"""
body = ""
respheader = respheader.replace("$len$",str(len(body)))
respheader = respheader.replace("$cookie$",cookie)
self.send(respheader+body)
#self.send(body)
def getCommandServer(self):
result = None
for server in self.server.CALLER.SERVERS:
if server.TYPE=="Command Server":
result = server
break
return result
#end class
class Server(Base):
def __init__(self,serverPort=843, caller=None):
self.TYPE = "Web Server"
Base.__init__(self,"TCP",serverPort,caller)
self.log("Started",2)
#end def
def protocolhandler(self,conn, addr):
# FLASH POLICY FILE SUPPORT
self.HANDLER = HTTPProtoHandler(conn,addr,self)
#end def
#end class
| gpl-3.0 | 3,507,254,456,359,414,300 | 36.142857 | 210 | 0.647692 | false |
georgekwon/pyquick | basic/list2.py | 1 | 2267 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
nums = sorted(nums)
list = []
test = 0
for n in nums:
if n != test:
list.append(n)
test = n
return list
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
list = []
for one in list1:
return list
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| apache-2.0 | 1,272,251,162,040,184,600 | 30.054795 | 79 | 0.648434 | false |
codingneo/CLRPrediction | src/model/ftrl_proximal.py | 1 | 3602 | """Follow The Regularized Leader Proximal Online Learning
Author:
"""
from math import exp, sqrt
class model(object):
''' Our main algorithm: Follow the regularized leader - proximal
In short,
this is an adaptive-learning-rate sparse logistic-regression with
efficient L1-L2-regularization
Reference:
http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf
'''
def __init__(self, alpha, beta, L1, L2, D, interaction):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
# feature related parameters
self.D = D
self.interaction = interaction
# model
# n: squared sum of past gradients
# z: weights
# w: lazy weights
self.n = [0.] * D
self.z = [0.] * D
self.w = {}
def _indices(self, x):
''' A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
# first yield index of the bias term
yield 0
# then yield the normal indices
for index in x:
yield index
# now yield interactions (if applicable)
if self.interaction:
D = self.D
L = len(x)
x = sorted(x)
for i in xrange(L):
for j in xrange(i+1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x):
''' Get probability estimation on x
INPUT:
x: features
OUTPUT:
probability of p(y = 1 | x; w)
'''
# parameters
alpha = self.alpha
beta = self.beta
L1 = self.L1
L2 = self.L2
# model
n = self.n
z = self.z
w = {}
# wTx is the inner product of w and x
wTx = 0.
for i in self._indices(x):
sign = -1. if z[i] < 0 else 1. # get sign of z[i]
# build w on the fly using z and n, hence the name - lazy weights
# we are doing this at prediction instead of update time is because
# this allows us for not storing the complete w
if sign * z[i] <= L1:
# w[i] vanishes due to L1 regularization
w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get w
w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)
wTx += w[i]
# cache the current w for update stage
self.w = w
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 10.), -10.)))
def update(self, x, p, y):
''' Update model using x, p, y
INPUT:
x: feature, a list of indices
p: click probability prediction of our model
y: answer
MODIFIES:
self.n: increase by squared gradient
self.z: weights
'''
# parameter
alpha = self.alpha
# model
n = self.n
z = self.z
w = self.w
# gradient under logloss
g = p - y
# update z and n
for i in self._indices(x):
sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha
z[i] += g - sigma * w[i]
n[i] += g * g | apache-2.0 | 8,417,160,105,458,115,000 | 25.688889 | 79 | 0.493615 | false |
nerdvegas/rez | src/rezgui/widgets/VariantSummaryWidget.py | 1 | 4520 | from Qt import QtCompat, QtCore, QtWidgets
from rezgui.util import create_pane, get_timestamp_str
from rez.packages import Package, Variant
from rez.util import find_last_sublist
class VariantSummaryWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(VariantSummaryWidget, self).__init__(parent)
self.variant = None
self.label = QtWidgets.QLabel()
self.table = QtWidgets.QTableWidget(0, 1)
self.table.setGridStyle(QtCore.Qt.DotLine)
self.table.setFocusPolicy(QtCore.Qt.NoFocus)
self.table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.table.setAlternatingRowColors(True)
hh = self.table.horizontalHeader()
hh.setStretchLastSection(True)
hh.setVisible(False)
vh = self.table.verticalHeader()
QtCompat.QHeaderView.setSectionResizeMode(
vh, QtWidgets.QHeaderView.ResizeToContents)
create_pane([self.label, self.table], False, compact=True,
parent_widget=self)
self.clear()
def clear(self):
self.label.setText("no package selected")
self.table.clear()
self.table.setRowCount(0)
vh = self.table.verticalHeader()
vh.setVisible(False)
self.setEnabled(False)
def set_variant(self, variant):
if variant == self.variant:
return
if variant is None:
self.clear()
else:
self.setEnabled(True)
if isinstance(variant, Package):
label_name = variant.qualified_name
location = variant.uri
else:
label_name = variant.qualified_package_name
location = variant.parent.uri
label = "%s@%s" % (label_name, variant.wrapped.location)
self.label.setText(label)
self.table.clear()
rows = []
if variant.description:
desc = variant.description
max_chars = 1000
if len(desc) > max_chars:
desc = desc[:max_chars] + "..."
rows.append(("description: ", desc))
if variant.uri:
rows.append(("location: ", location))
if variant.timestamp:
release_time_str = get_timestamp_str(variant.timestamp)
rows.append(("released: ", release_time_str))
if variant.authors:
txt = "; ".join(variant.authors)
rows.append(("authors: ", txt))
if variant.requires:
var_strs = [str(x) for x in variant.requires]
if isinstance(variant, Variant):
# put variant-specific requires in square brackets
if variant.requires:
index = find_last_sublist(variant.requires, variant.requires)
if index is not None:
var_strs[index] = "[%s" % var_strs[index]
index2 = index + len(variant.requires) - 1
var_strs[index2] = "%s]" % var_strs[index2]
txt = "; ".join(var_strs)
rows.append(("requires: ", txt))
self.table.setRowCount(len(rows))
for i, row in enumerate(rows):
label, value = row
item = QtWidgets.QTableWidgetItem(label)
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.table.setVerticalHeaderItem(i, item)
item = QtWidgets.QTableWidgetItem(value)
self.table.setItem(i, 0, item)
vh = self.table.verticalHeader()
vh.setVisible(True)
self.table.resizeRowsToContents()
self.variant = variant
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -3,228,950,963,447,475,000 | 37.632479 | 85 | 0.589159 | false |
maximdanilchenko/fusionBasedRecSys | test_all_values_results.py | 1 | 1449 | from metrics import *
from itembased_recommender_system import *
import shelve
genData('base','u2.base')
print('base data ready')
genData('test','u2.test')
print('test data ready')
base = shelve.open('base')
test = shelve.open('test')
print('data opened %d'%len(base))
tr = transform(base)
print('transformed')
##SupMatrix = multipleSupportMatrix(tr,[PCC,CPCC,SPCC,Jaccard,MSD,JMSD,COS,ACOS],'result')
##print('support matrix calculated!!!')
SM = shelve.open('result')
SupMatrix = {}
for i in SM:
SupMatrix[i] = SM[i]
print('SM opened with size%d'%len(SupMatrix))
sims = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
disims = [-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
res = {}
n = 300
for sim in sims:
res[sim] = {}
for disim in disims:
originalRes = {}
testRes = {}
itMS = itemMatrixSup(tr,n,SupMatrix,7,sim,disim)
for user in test:
testRes[user] = {}
originalRes[user] = {}
for item in test[user]:
rec = recommendOne(base,tr,itMS,item,user)
if (rec != 200):
testRes[user][item] = rec
originalRes[user][item] = test[user][item]
res[sim][disim] = MAE(originalRes,testRes)
print('MAE for %f sim and %f disim is %f'%(sim,disim,res[sim][disim]))
| mit | 6,582,981,674,418,804,000 | 27.571429 | 90 | 0.547964 | false |
innes213/TradingTools | examples/dashboard.py | 1 | 2782 | from pyhoofinance.defs import *
from pyhoofinance.quotedata import get_quote
from tradingtools.market_metrics.historic_change_and_stdv import s_and_p_historic
from tradingtools.market_metrics.market_cap_index_performance import market_cap_index_performance
from tradingtools.market_metrics.sector_performance import sector_performance
from tradingtools.technicals.indicators.SMA import SMA
if __name__ == '__main__':
day_ranges = [1, 2, 5, 10, 20, 100, 200, 500]
print '\n================= S&P Dashboard =================\n'
print '\nMarket Cap index performance:\n'
data = market_cap_index_performance(dayranges=day_ranges)
if data is not None:
outstr = 'Index\t'
for i in day_ranges:
outstr = outstr + str(i) + '-day\t'
print outstr
for idx, perf_list in data:
outstr = '%s: \t' % idx
for perf in perf_list:
outstr = outstr + '%5.2f%%\t' % (100 * perf)
print outstr
print '\nSummary of price changes\n'
data = s_and_p_historic(1)
for daydata in data:
outstr = '%12s: ' % str(daydata['tradedate']) + \
'Advancers: %5i \t' % daydata['gainers'] + \
'Decliners: %5i \t' % daydata['decliners'] + \
'Average change: %2.2f%% \t' % daydata['avgpercentchange'] + \
'Std Dev: %2.2f%% \t' % daydata['percentchangestdev'] + \
'Total Volume: %i \t' % int(daydata['volume'])
print outstr
print '\nS & P Sector Performance\n'
data = sector_performance(day_ranges)
if data is not None:
outstr = 'Sector'
for i in day_ranges:
outstr = outstr + '\t%i-day' % i
print outstr
for symbol, perf_data in data:
outstr = '%s:' % symbol
for perf in perf_data:
outstr = outstr + '\t%3.2f%%' % (100 * perf)
print outstr
# Sector Rotation triggers
print '\nS & P Sector Rotation\n'
spyquote = get_quote('SPY')
spylast = spyquote[LAST_TRADE_PRICE_ONLY_STR]
d0 = spyquote[LAST_TRADE_DATE_STR]
#[TODO: replace number of days with 1 month and 1 year
# get S&P 500 1 year performance and moving average
spymadays = 240 # values greater than 36 diverge from yahoo and etrade sma calculations
spysma = SMA(num_periods=1, window_size=spymadays).calculate_for_symbol('SPY')[0]
spymadelta = 100 * (spylast - spysma) / spysma
num_days = 22
data = sector_performance(num_days)
print d0.strftime('As of %d %b, %Y')
print 'SPY difference from %i moving average: %3.2f%% ' % (spymadays, spymadelta)
print '%i-Day Performance' % num_days
for symbol, perf in data:
print '%s: %3.2f%%' % (symbol, 100 * perf)
| bsd-2-clause | 7,186,727,864,974,381,000 | 39.318841 | 97 | 0.593817 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/shared_criterion_service.py | 1 | 5893 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v6.resources.types import shared_criterion as gagr_shared_criterion
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.services',
marshal='google.ads.googleads.v6',
manifest={
'GetSharedCriterionRequest',
'MutateSharedCriteriaRequest',
'SharedCriterionOperation',
'MutateSharedCriteriaResponse',
'MutateSharedCriterionResult',
},
)
class GetSharedCriterionRequest(proto.Message):
r"""Request message for
[SharedCriterionService.GetSharedCriterion][google.ads.googleads.v6.services.SharedCriterionService.GetSharedCriterion].
Attributes:
resource_name (str):
Required. The resource name of the shared
criterion to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateSharedCriteriaRequest(proto.Message):
r"""Request message for
[SharedCriterionService.MutateSharedCriteria][google.ads.googleads.v6.services.SharedCriterionService.MutateSharedCriteria].
Attributes:
customer_id (str):
Required. The ID of the customer whose shared
criteria are being modified.
operations (Sequence[google.ads.googleads.v6.services.types.SharedCriterionOperation]):
Required. The list of operations to perform
on individual shared criteria.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v6.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='SharedCriterionOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=3,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class SharedCriterionOperation(proto.Message):
r"""A single operation (create, remove) on an shared criterion.
Attributes:
create (google.ads.googleads.v6.resources.types.SharedCriterion):
Create operation: No resource name is
expected for the new shared criterion.
remove (str):
Remove operation: A resource name for the removed shared
criterion is expected, in this format:
``customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_shared_criterion.SharedCriterion,
)
remove = proto.Field(
proto.STRING,
number=3,
oneof='operation',
)
class MutateSharedCriteriaResponse(proto.Message):
r"""Response message for a shared criterion mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v6.services.types.MutateSharedCriterionResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateSharedCriterionResult',
)
class MutateSharedCriterionResult(proto.Message):
r"""The result for the shared criterion mutate.
Attributes:
resource_name (str):
Returned for successful operations.
shared_criterion (google.ads.googleads.v6.resources.types.SharedCriterion):
The mutated shared criterion with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
shared_criterion = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_shared_criterion.SharedCriterion,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,445,974,067,140,955,000 | 32.674286 | 128 | 0.665535 | false |
Winand/pandas | pandas/core/internals.py | 1 | 186942 | import copy
from warnings import catch_warnings
import itertools
import re
import operator
from datetime import datetime, timedelta, date
from collections import defaultdict
from functools import partial
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.dtypes.dtypes import (
ExtensionDtype, DatetimeTZDtype,
CategoricalDtype)
from pandas.core.dtypes.common import (
_TD_DTYPE, _NS_DTYPE,
_ensure_int64, _ensure_platform_int,
is_integer,
is_dtype_equal,
is_timedelta64_dtype,
is_datetime64_dtype, is_datetimetz, is_sparse,
is_categorical, is_categorical_dtype,
is_integer_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_object_dtype,
is_datetimelike_v_numeric,
is_float_dtype, is_numeric_dtype,
is_numeric_v_string_like, is_extension_type,
is_list_like,
is_re,
is_re_compilable,
is_scalar,
_get_dtype)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_upcast,
maybe_promote,
infer_dtype_from,
infer_dtype_from_scalar,
soft_convert_objects,
maybe_convert_objects,
astype_nansafe,
find_common_type)
from pandas.core.dtypes.missing import (
isna, notna, array_equivalent,
_isna_compat,
is_null_datelike_scalar)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex
from pandas.core.common import is_null_slice
import pandas.core.algorithms as algos
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, _maybe_to_categorical
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.io.formats.printing import pprint_thing
import pandas.core.missing as missing
from pandas.core.sparse.array import _maybe_to_sparse, SparseArray
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
from pandas._libs.lib import BlockPlacement
import pandas.core.computation.expressions as expressions
from pandas.util._decorators import cache_readonly
from pandas.util._validators import validate_bool_kwarg
from pandas import compat
from pandas.compat import range, map, zip, u
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_box_to_block_values = True
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if ndim and len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d, placement '
'implies %d' % (len(self.values),
len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
elif is_categorical_dtype(dtype):
return True
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def formatting_values(self):
"""Return the internal values used by the DataFrame/SeriesFormatter"""
return self.internal_values()
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overriden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def to_dense(self):
return self.values.view()
@property
def _na_value(self):
return np.nan
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None, ndim=None, **kwargs):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim, **kwargs)
def make_block_scalar(self, values, **kwargs):
"""
Create a ScalarBlock
"""
return ScalarBlock(values)
def make_block_same_class(self, values, placement=None, fastpath=True,
**kwargs):
""" Wrap given values in a block of same type as self. """
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (name, len(self), self.dtype)
else:
shape = ' x '.join([pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (name, pprint_thing(
self.mgr_locs.indexer), shape, self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(values=self.get_values().T,
placement=self.mgr_locs, shape=shape,
labels=labels, ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = algos.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return self.make_block(new_values, fastpath=True)
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
with np.errstate(all='ignore'):
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result,
ndim=self.ndim))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
mask = isna(self.values)
if limit is not None:
if not is_integer(limit):
raise ValueError('Limit must be an integer')
if limit < 1:
raise ValueError('Limit must be greater than 0')
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim - 1) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _, _, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
return self._maybe_downcast(blocks, downcast)
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# operate column-by-column
def f(m, v, i):
block = self.coerce_to_target_dtype(value)
# slice out our block
if i is not None:
block = block.getitem_block(slice(i, i + 1))
return block.fillna(value,
limit=limit,
inplace=inplace,
downcast=None)
return self.split_and_operate(mask, f, inplace)
def split_and_operate(self, mask, f, inplace):
"""
split the block per-column, and apply the callable f
per-column, return a new block for each. Handle
masking which will not change a block unless needed.
Parameters
----------
mask : 2-d boolean mask
f : callable accepting (1d-mask, 1d values, indexer)
inplace : boolean
Returns
-------
list of blocks
"""
if mask is None:
mask = np.ones(self.shape, dtype=bool)
new_values = self.values
def make_a_block(nv, ref_loc):
if isinstance(nv, Block):
block = nv
elif isinstance(nv, list):
block = nv[0]
else:
# Put back the dimension that was taken from it and make
# a block out of the result.
try:
nv = _block_shape(nv, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
block = self.make_block(values=nv,
placement=ref_loc, fastpath=True)
return block
# ndim == 1
if self.ndim == 1:
if mask.any():
nv = f(mask, new_values, None)
else:
nv = new_values if inplace else new_values.copy()
block = make_a_block(nv, self.mgr_locs)
return [block]
# ndim > 1
new_blocks = []
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
nv = f(m, v, i)
else:
nv = v if inplace else v.copy()
block = make_a_block(nv, [ref_loc])
new_blocks.append(block)
return new_blocks
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
if not isinstance(blocks, list):
blocks = [blocks]
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = maybe_downcast_to_dtype(values, dtypes)
return self.make_block(nv, fastpath=True)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# operate column-by-column
# this is expensive as it splits the blocks items-by-item
def f(m, v, i):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
if dtype is not None:
v = maybe_downcast_to_dtype(v, dtype)
return v
return self.split_and_operate(None, f, False)
def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
return self._astype(dtype, copy=copy, errors=errors, values=values,
**kwargs)
def _astype(self, dtype, copy=False, errors='raise', values=None,
klass=None, mgr=None, raise_on_error=False, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
errors_legal_values = ('raise', 'ignore')
if errors not in errors_legal_values:
invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. "
"Supplied value is '{}'".format(
list(errors_legal_values), errors))
raise ValueError(invalid_arg)
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
if (('categories' in kwargs or 'ordered' in kwargs) and
isinstance(dtype, CategoricalDtype)):
raise TypeError("Cannot specify a CategoricalDtype and also "
"`categories` or `ordered`. Use "
"`dtype=CategoricalDtype(categories, ordered)`"
" instead.")
kwargs = kwargs.copy()
categories = getattr(dtype, 'categories', None)
ordered = getattr(dtype, 'ordered', False)
kwargs.setdefault('categories', categories)
kwargs.setdefault('ordered', ordered)
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type,
(compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype,
klass=klass)
except:
if errors == 'raise':
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, element):
""" require the same dtype as ourselves """
dtype = self.values.dtype.type
if is_list_like(element):
element = np.asarray(element)
tipo = element.dtype.type
return issubclass(tipo, dtype)
return isinstance(element, dtype)
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isna(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return maybe_downcast_to_dtype(result, dtype)
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if np.any(notna(other)) and not self._can_hold_element(other):
# coercion issues
# let higher levels handle
raise TypeError("cannot convert {} to an {}".format(
type(other).__name__,
type(self).__name__.lower().replace('Block', '')))
return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
original_to_replace = to_replace
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values,
to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [b.convert(by_item=True, numeric=False,
copy=not inplace) for b in blocks]
return blocks
except (TypeError, ValueError):
# try again with a compatible block
block = self.astype(object)
return block.replace(
to_replace=original_to_replace, value=value, inplace=inplace,
filter=filter, regex=regex, convert=convert)
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs['inplace'] else self.copy()
def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce if block dtype can store value
values = self.values
try:
values, _, value, _ = self._try_coerce_args(values, value)
# can keep its own dtype
if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
value.dtype):
dtype = self.dtype
else:
dtype = 'infer'
except (TypeError, ValueError):
# current dtype cannot store value, coerce to common dtype
find_dtype = False
if hasattr(value, 'dtype'):
dtype = value.dtype
find_dtype = True
elif is_scalar(value):
if isna(value):
# NaN promotion is handled in latter path
dtype = False
else:
dtype, _ = infer_dtype_from_scalar(value,
pandas_dtype=True)
find_dtype = True
else:
dtype = 'infer'
if find_dtype:
dtype = find_common_type([values.dtype, dtype])
if not is_dtype_equal(self.dtype, dtype):
b = self.astype(dtype)
return b.setitem(indexer, value, mgr=mgr)
# value must be storeable at this moment
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (len(arr_value.shape) and
arr_value.shape[0] == values.shape[0] and
np.prod(arr_value.shape) == np.prod(values.shape)):
values[indexer] = value
try:
values = values.astype(arr_value.dtype)
except ValueError:
pass
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
return block
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
if hasattr(new, 'reindex_axis'):
new = new.values
if hasattr(mask, 'reindex_axis'):
mask = mask.values
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isna(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
_, _, new, _ = self._try_coerce_args(new_values, new)
if transpose:
new_values = new_values.T
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explictly repeat and reshape new instead
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(
new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
# we require exact matches between the len of the
# values we are setting (or is compat). np.putmask
# doesn't check this and will simply truncate / pad
# the output, but we want sane error messages
#
# TODO: this prob needs some better checking
# for 2D cases
if ((is_list_like(new) and
np.any(mask[mask]) and
getattr(new, 'ndim', 1) == 1)):
if not (mask.shape[-1] == len(new) or
mask[mask].shape[-1] == len(new) or
len(new) == 1):
raise ValueError("cannot assign mismatch "
"length to masked array")
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# operate column-by-column
def f(m, v, i):
if i is None:
# ndim==1 case.
n = new
else:
if isinstance(new, np.ndarray):
n = np.squeeze(new[i % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
return nv
new_blocks = self.split_and_operate(mask, f, inplace)
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def coerce_to_target_dtype(self, other):
"""
coerce the current block to a dtype compat for other
we will return a block, possibly object, and not raise
we can also safely try to coerce to the same dtype
and will receive the same block
"""
# if we cannot then coerce to object
dtype, _ = infer_dtype_from(other, pandas_dtype=True)
if is_dtype_equal(self.dtype, dtype):
return self
if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):
# we don't upcast to bool
return self.astype(object)
elif ((self.is_float or self.is_complex) and
(is_integer_dtype(dtype) or is_float_dtype(dtype))):
# don't coerce float/complex to int
return self
elif (self.is_datetime or
is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)):
# not a datetime
if not ((is_datetime64_dtype(dtype) or
is_datetime64tz_dtype(dtype)) and self.is_datetime):
return self.astype(object)
# don't upcast timezone with different timezone or no timezone
mytz = getattr(self.dtype, 'tz', None)
othertz = getattr(dtype, 'tz', None)
if str(mytz) != str(othertz):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
elif (self.is_timedelta or is_timedelta64_dtype(dtype)):
# not a timedelta
if not (is_timedelta64_dtype(dtype) and self.is_timedelta):
return self.astype(object)
raise AssertionError("possible recursion in "
"coerce_to_target_dtype: {} {}".format(
self, other))
try:
return self.astype(dtype)
except (ValueError, TypeError):
pass
return self.astype(object)
def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
fill_value=None, coerce=False, downcast=None, mgr=None,
**kwargs):
inplace = validate_bool_kwarg(inplace, 'inplace')
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m, axis=axis,
inplace=inplace, limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast, mgr=mgr)
# try an interp method
try:
m = missing.clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m, index=index, values=values,
axis=axis, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value, inplace=inplace,
downcast=downcast, mgr=mgr, **kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, 'inplace')
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values, klass=self.__class__, fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward', inplace=False, downcast=None,
mgr=None, **kwargs):
""" interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, 'inplace')
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block(interp_values, klass=self.__class__,
fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values, fastpath=True)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, _ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
try_cast : try casting the results to the input type
Returns
-------
a new block, the result of the func
"""
orig_other = other
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
try:
values, values_mask, other, other_mask = self._try_coerce_args(
transf(values), other)
except TypeError:
block = self.coerce_to_target_dtype(orig_other)
return block.eval(func, orig_other,
raise_on_error=raise_on_error,
try_cast=try_cast, mgr=mgr)
# get the result, may need to transpose the other
def get_result(other):
# avoid numpy warning of comparisons again None
if other is None:
result = not func.__name__ == 'eq'
# avoid numpy warning of elementwise comparisons to object
elif is_numeric_v_string_like(values, other):
result = False
# avoid numpy warning of elementwise comparisons
elif func.__name__ == 'eq':
if is_list_like(other) and not isinstance(other, np.ndarray):
other = np.asarray(other)
# if we can broadcast, then ok
if values.shape[-1] != other.shape[-1]:
return False
result = func(values, other)
else:
result = func(values, other)
# mask if needed
if isinstance(values_mask, np.ndarray) and values_mask.any():
result = result.astype('float64', copy=False)
result[values_mask] = np.nan
if other_mask is True:
result = result.astype('float64', copy=False)
result[:] = np.nan
elif isinstance(other_mask, np.ndarray) and other_mask.any():
result = result.astype('float64', copy=False)
result[other_mask.ravel()] = np.nan
return result
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
# The 'detail' variable is defined in outer scope.
raise TypeError('Could not operate %s with block values %s' %
(repr(other), str(detail))) # noqa
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
with np.errstate(all='ignore'):
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values' %
repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
result = _block_shape(result, ndim=self.ndim)
return [self.make_block(result, fastpath=True, )]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
values = self.values
orig_other = other
if transpose:
values = values.T
if hasattr(other, 'reindex_axis'):
other = other.values
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# If the default broadcasting would go in the wrong direction, then
# explictly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1, )))
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, values_mask, other, other_mask = self._try_coerce_args(
values, other)
try:
return self._try_coerce_result(expressions.where(
cond, values, other, raise_on_error=True))
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
try:
result = func(cond, values, other)
except TypeError:
# we cannot coerce, return a compat dtype
# we are explicity ignoring raise_on_error here
block = self.coerce_to_target_dtype(other)
blocks = block.where(orig_other, cond, align=align,
raise_on_error=raise_on_error,
try_cast=try_cast, axis=axis,
transpose=transpose)
return self._maybe_downcast(blocks, 'infer')
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
self.make_block(r.T, placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def _unstack(self, unstacker_func, new_columns):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [make_block(new_values, placement=new_placement)]
return blocks, mask
def quantile(self, qs, interpolation='linear', axis=0, mgr=None):
"""
compute the quantiles of the
Parameters
----------
qs: a scalar or list of the quantiles to be computed
interpolation: type of interpolation, default 'linear'
axis: axis to compute, default 0
Returns
-------
tuple of (axis, block)
"""
kw = {'interpolation': interpolation}
values = self.get_values()
values, _, _, _ = self._try_coerce_args(values, values)
def _nanpercentile1D(values, mask, q, **kw):
values = values[~mask]
if len(values) == 0:
if is_scalar(q):
return self._na_value
else:
return np.array([self._na_value] * len(q),
dtype=values.dtype)
return np.percentile(values, q, **kw)
def _nanpercentile(values, q, axis, **kw):
mask = isna(self.values)
if not is_scalar(mask) and mask.any():
if self.ndim == 1:
return _nanpercentile1D(values, mask, q, **kw)
else:
# for nonconsolidatable blocks mask is 1D, but values 2D
if mask.ndim < values.ndim:
mask = mask.reshape(values.shape)
if axis == 0:
values = values.T
mask = mask.T
result = [_nanpercentile1D(val, m, q, **kw) for (val, m)
in zip(list(values), list(mask))]
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
return np.percentile(values, q, axis=axis, **kw)
from pandas import Float64Index
is_empty = values.shape[axis] == 0
if is_list_like(qs):
ax = Float64Index(qs)
if is_empty:
if self.ndim == 1:
result = self._na_value
else:
# create the array of na_values
# 2d len(values) * len(qs)
result = np.repeat(np.array([self._na_value] * len(qs)),
len(values)).reshape(len(values),
len(qs))
else:
try:
result = _nanpercentile(values, np.array(qs) * 100,
axis=axis, **kw)
except ValueError:
# older numpies don't handle an array for q
result = [_nanpercentile(values, q * 100,
axis=axis, **kw) for q in qs]
result = np.array(result, copy=False)
if self.ndim > 1:
result = result.T
else:
if self.ndim == 1:
ax = Float64Index([qs])
else:
ax = mgr.axes[0]
if is_empty:
if self.ndim == 1:
result = self._na_value
else:
result = np.array([self._na_value] * len(self))
else:
result = _nanpercentile(values, qs * 100, axis=axis, **kw)
ndim = getattr(result, 'ndim', None) or 0
result = self._try_coerce_result(result)
if is_scalar(result):
return ax, self.make_block_scalar(result)
return ax, make_block(result,
placement=np.arange(len(result)),
ndim=ndim)
class ScalarBlock(Block):
"""
a scalar compat Block
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
def __init__(self, values):
self.ndim = 0
self.mgr_locs = [0]
self.values = values
@property
def dtype(self):
return type(self.values)
@property
def shape(self):
return tuple([0])
def __len__(self):
return 0
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
@property
def shape(self):
if self.ndim == 1:
return (len(self.values)),
return (len(self.mgr_locs), len(self.values))
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# use block's copy logic.
# .values may be an Index which does shallow copy by default
new_values = self.values if inplace else self.copy().values
new_values, _, new, _ = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
mask = _safe_reshape(mask, new_values.shape)
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
def _unstack(self, unstacker_func, new_columns):
"""Return a list of unstacked blocks of self
Parameters
----------
unstacker_func : callable
Partially applied unstacker.
new_columns : Index
All columns of the unstacked BlockManager.
Returns
-------
blocks : list of Block
New blocks of unstacked values.
mask : array_like of bool
The mask of columns of `blocks` we should keep.
"""
# NonConsolidatable blocks can have a single item only, so we return
# one block per item
unstacker = unstacker_func(self.values.T)
new_items = unstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = unstacker.get_new_values()
mask = mask.any(0)
new_values = new_values.T[mask]
new_placement = new_placement[mask]
blocks = [self.make_block_same_class(vals, [place])
for vals, place in zip(new_values, new_placement)]
return blocks, mask
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.asarray(element)
tipo = element.dtype.type
return (issubclass(tipo, (np.floating, np.integer)) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
return (isinstance(element, (float, int, np.floating, np.int_)) and
not isinstance(element, (bool, np.bool_, datetime, timedelta,
np.datetime64, np.timedelta64)))
def to_native_types(self, slicer=None, na_rep='', float_format=None,
decimal='.', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
# see gh-13418: no special formatting is desired at the
# output (important for appropriate 'quoting' behaviour),
# so do not pass it through the FloatArrayFormatter
if float_format is None and decimal == '.':
mask = isna(values)
if not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type,
(np.floating, np.integer, np.complexfloating))
return (isinstance(element,
(float, int, complex, np.float_, np.int_)) and
not isinstance(element, (bool, np.bool_)))
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)) and
self.dtype.itemsize >= element.dtype.itemsize)
return is_integer(element)
def should_store(self, value):
return is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin(object):
@property
def _na_value(self):
return tslib.NaT
@property
def fill_value(self):
return tslib.iNaT
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if is_object_dtype(dtype):
return lib.map_infer(self.values.ravel(),
self._box_func).reshape(self.values.shape)
return self.values
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def _box_func(self):
return lambda x: tslib.Timedelta(x, unit='ns')
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.timedelta64)
return isinstance(element, (timedelta, np.timedelta64))
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
if is_integer(value) and not isinstance(value, np.timedelta64):
value = Timedelta(value, unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isna(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, Timedelta):
other_mask = isna(other)
other = other.value
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.timedelta64):
other_mask = isna(other)
other = Timedelta(other).value
elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):
other_mask = isna(other)
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isna(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, (np.integer, np.float)):
result = self._box_func(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.asarray(element)
return issubclass(element.dtype.type, np.bool_)
return isinstance(element, (bool, np.bool_))
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex, convert=convert,
mgr=mgr)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False, placement=None,
**kwargs):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath,
placement=placement, **kwargs)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
# TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = True if 'by_item' not in kwargs else kwargs['by_item']
new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = soft_convert_objects
fn_inputs = new_inputs
else:
fn = maybe_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {}
for key in fn_inputs:
if key in kwargs:
fn_kwargs[key] = kwargs[key]
# operate column-by-column
def f(m, v, i):
shape = v.shape
values = fn(v.ravel(), **fn_kwargs)
try:
values = values.reshape(shape)
values = _block_shape(values, ndim=self.ndim)
except (AttributeError, NotImplementedError):
pass
return values
if by_item and not self._is_single_block:
blocks = self.split_and_operate(None, f, False)
else:
values = f(None, self.values.ravel(), None)
blocks = [make_block(values, ndim=self.ndim,
placement=self.mgr_locs)]
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape), dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False)
for b in blocks])
def _can_hold_element(self, element):
return True
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if isinstance(other, ABCDatetimeIndex):
# to store DatetimeTZBlock as object
other = other.asobject.values
return values, False, other, False
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or
is_extension_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
to_rep_is_list = is_list_like(to_replace)
value_is_list = is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and is_re(to_replace):
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, regex=True,
convert=convert, mgr=mgr)
elif not (either_list or regex):
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, value, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, convert=convert,
regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
# to_replace is regex compilable
to_rep_re = regex and is_re_compilable(to_replace)
# regex is regex compilable
regex_re = is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
mgr=mgr)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(by_item=True, numeric=False)
return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement, fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(_maybe_to_categorical(values),
fastpath=True,
placement=placement, **kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return self.copy() if copy else self
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
# GH12564: CategoricalBlock is 1-dim only
# while returned results could be any dim
if ((not is_categorical_dtype(result)) and
isinstance(result, np.ndarray)):
result = _block_shape(result, ndim=self.ndim)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = self._try_coerce_result(values.fillna(value=value,
limit=limit))
return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(fill_value=fill_value, method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _astype(self, dtype, copy=False, errors='raise', values=None,
klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return self.make_block(values)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isna(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement, fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values, fastpath=True,
placement=placement, **kwargs)
def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
# if we are passed a datetime64[ns, tz]
if is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
values = self.values
if getattr(values, 'tz', None) is None:
values = DatetimeIndex(values).tz_localize('UTC')
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (is_integer(element) or isinstance(element, datetime) or
isna(element))
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isna(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other_mask = isna(other)
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and is_datetime64_dtype(other):
other_mask = isna(other)
other = other.astype('i8', copy=False).view('i8')
else:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
try:
result = result.astype('M8[ns]')
except ValueError:
pass
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = self._box_func(result)
return result
@property
def _box_func(self):
return tslib.Timestamp
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[..., slicer]
from pandas.io.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
values.view('i8').ravel(), tz=getattr(self.values, 'tz', None),
format=format, na_rep=na_rep).reshape(values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (issubclass(value.dtype.type, np.datetime64) and
not is_datetimetz(value))
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
_holder = DatetimeIndex
is_datetimetz = True
def __init__(self, values, placement, ndim=2, **kwargs):
if not isinstance(values, self._holder):
values = self._holder(values)
dtype = kwargs.pop('dtype', None)
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
values = values._shallow_copy(tz=dtype.tz)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
super(DatetimeTZBlock, self).__init__(values, placement=placement,
ndim=ndim, **kwargs)
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy(deep=True)
return self.make_block_same_class(values)
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for
external compat with ndarray, export as a ndarray of Timestamps
"""
return self.values.astype('datetime64[ns]').values
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if is_object_dtype(dtype):
f = lambda x: lib.Timestamp(x, tz=self.values.tz)
return lib.map_infer(
self.values.ravel(), f).reshape(self.values.shape)
return self.values
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, values, other):
"""
localize and return i8 for the values
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = _block_shape(isna(values), ndim=self.ndim)
# asi8 is a view, needs copy
values = _block_shape(values.asi8, ndim=self.ndim)
other_mask = False
if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isna(other)
if isinstance(other, bool):
raise TypeError
elif (is_null_datelike_scalar(other) or
(is_scalar(other) and isna(other))):
other = tslib.iNaT
other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.asi8
other_mask = isna(other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other_mask = isna(other)
other = other.value
else:
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = lib.Timestamp(result, tz=self.values.tz)
if isinstance(result, np.ndarray):
# allow passing of > 1dim if its trivial
if result.ndim > 1:
result = result.reshape(np.prod(result.shape))
result = self.values._shallow_copy(result)
return result
@property
def _box_func(self):
return lambda x: tslib.Timestamp(x, tz=self.dtype.tz)
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
# think about moving this to the DatetimeIndex. This is a non-freq
# (number of periods) shift ###
N = len(self)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.asi8.take(indexer)
if periods > 0:
new_values[:periods] = tslib.iNaT
else:
new_values[periods:] = tslib.iNaT
new_values = self.values._shallow_copy(new_values)
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
# return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
self.values.fill_value = v
def to_dense(self):
return self.values.to_dense().view()
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None, **kwargs):
if values is None:
values = self.values
values = values.astype(dtype, copy=copy)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement, sparse_index=None,
kind=None, dtype=None, fill_value=None,
copy=False, fastpath=True, **kwargs):
""" return a new block """
if dtype is None:
dtype = values.dtype
if fill_value is None and not isinstance(values, SparseArray):
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return self.make_block(np.empty(values.shape, dtype=dtype),
placement,
fastpath=True)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return self.make_block(new_values, fastpath=fastpath,
placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = missing.interpolate_2d(self.values.to_dense(), method, axis,
limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = values.fillna(value, downcast=downcast)
return [self.make_block_same_class(values=values,
placement=self.mgr_locs)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None, dtype=None,
fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
if hasattr(values, 'tz'):
klass = DatetimeTZBlock
else:
klass = DatetimeBlock
elif is_datetimetz(values):
klass = DatetimeTZBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
elif klass is DatetimeTZBlock and not is_datetimetz(values):
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement, dtype=dtype)
return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, Panel, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple "
"items")
else:
if self.ndim != block.ndim:
raise AssertionError('Number of Block dimensions (%d) '
'must equal number of axes (%d)' %
(block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [_ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' %
(old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
def add_prefix(self, prefix):
f = partial('{prefix}{}'.format, prefix=prefix)
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = partial('{}{suffix}'.format, suffix=suffix)
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4 and
'0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(
len(self.items), tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False,
consolidate=True, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == 'where':
align_copy = True
if kwargs.get('align', True):
align_keys = ['other', 'cond']
else:
align_keys = ['cond']
elif f == 'putmask':
align_copy = False
if kwargs.get('align', True):
align_keys = ['new', 'mask']
else:
align_keys = ['mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k])
for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def reduction(self, f, axis=0, consolidate=True, transposed=False,
**kwargs):
"""
iterate over the blocks, collect and create a new block manager.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
f: the callable or function name to operate on at the block level
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
Returns
-------
Block Manager (new object)
"""
if consolidate:
self._consolidate_inplace()
axes, blocks = [], []
for b in self.blocks:
kwargs['mgr'] = self
axe, block = getattr(b, f)(axis=axis, **kwargs)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = set([b.ndim for b in blocks])
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate(
[ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [b.make_block(b.values.T,
placement=np.arange(b.shape[1])
) for b in blocks]
return self.__class__(blocks, new_axes)
# 0 ndim
if 0 in ndim and 1 not in ndim:
values = np.array([b.values for b in blocks])
if len(values) == 1:
return values.item()
blocks = [make_block(values, ndim=1)]
axes = Index([ax[0] for ax in axes])
# single block
values = _concat._concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values,
ndim=1,
placement=np.arange(len(values)))],
axes[0])
def isna(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def quantile(self, **kwargs):
return self.reduction('quantile', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False,
mgr=None):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, 'inplace')
if mgr is None:
mgr = self
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isna(s):
return isna(values)
return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
convert = i == src_len
result = b.replace(s, d, inplace=inplace, regex=regex,
mgr=mgr, convert=convert)
new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
b = b.coerce_to_target_dtype(d)
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array
for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array,
axis=0, allow_fill=False)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def to_dict(self, copy=True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy)
for dtype, blocks in bd.items()}
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d' %
axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals,
placement=block.mgr_locs,
klass=block.__class__,
fastpath=True, )]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0,
allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or not block._box_to_block_values or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1, fastpath=True)],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x) for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
values = blk.values
# FIXME: this may return non-upcasted types?
if values.ndim == 1:
return values[full_loc[1]]
full_loc[0] = self._blklocs[full_loc[0]]
return values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_type(value)
# categorical/spares/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert {}, already exists'.format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim,
placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(new_index, method=method,
limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer,
fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value, )
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(placement=mgr_locs,
fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(blklocs[mgr_locs.indexer],
axis=0, new_mgr_locs=mgr_locs,
fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False) for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError('Number of dimensions must agree '
'got %d and %d' % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock)
for block, oblock in zip(self_blocks, other_blocks))
def unstack(self, unstacker_func):
"""Return a blockmanager with all blocks unstacked.
Parameters
----------
unstacker_func : callable
A (partially-applied) ``pd.core.reshape._Unstacker`` class.
Returns
-------
unstacked : BlockManager
"""
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
new_blocks = []
columns_mask = []
for blk in self.blocks:
blocks, mask = blk._unstack(
partial(unstacker_func,
value_columns=self.items[blk.mgr_locs.indexer]),
new_columns)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError("cannot create SingleBlockManager with more "
"than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1,
fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
fill_value = np.nan
new_values = algos.take_1d(values, indexer, fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = missing.interpolate_2d(new_values,
method=method,
limit=limit,
fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def formatting_values(self):
"""Return the internal values used by the DataFrame/SeriesFormatter"""
return self._block.formatting_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@property
def asobject(self):
"""
return a object dtype array. datetime/timedelta like values are boxed
to Timestamp/Timedelta instances.
"""
return self._block.get_values(dtype=object)
@property
def itemsize(self):
return self._block.values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed, implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
datetime_tz_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if is_sparse(v):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if is_datetimetz(v):
datetime_tz_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif is_datetimetz(v):
datetime_tz_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _multi_blockify(complex_items)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(datetime_tz_items):
dttz_blocks = [make_block(array,
klass=DatetimeTZBlock,
fastpath=True,
placement=[i], )
for i, _, array in datetime_tz_items]
blocks.extend(dttz_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True,
placement=[i])
for i, _, array in cat_items]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
dtype = find_common_type([b.dtype for b in blocks])
# only numpy compat
if isinstance(dtype, ExtensionDtype):
dtype = np.object
return dtype
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1, ) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _maybe_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
result = False
# numpy deprecation warning if comparing numeric vs string-like
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a, b)
if is_scalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return result
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return _ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = _ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
1) If `arr` is a Categorical or Index, `arr` will be
returned as is.
2) If `arr` is a Series, the `_values` attribute will
be reshaped and returned.
Parameters
----------
arr : array-like, object to be reshaped
new_shape : int or tuple of ints, the new shape
"""
if isinstance(arr, ABCSeries):
arr = arr._values
if not isinstance(arr, Categorical):
arr = arr.reshape(new_shape)
return arr
def _transform_index(index, func, level=None):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(index, MultiIndex):
if level is not None:
items = [tuple(func(y) if i == level else y
for i, y in enumerate(x)) for x in index]
else:
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new ndarray, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
Returns
-------
values : ndarray with updated values
this *may* be a copy of the original
See Also
--------
ndarray.putmask
"""
# we cannot use np.asarray() here as we cannot have conversions
# that numpy does when numeric are mixed with strings
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.repeat(n, len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
# make sure that we have a nullable type
# if we have nulls
if not _isna_compat(v, nn[0]):
raise ValueError
# we ignore ComplexWarning here
with catch_warnings(record=True):
nn_at = nn.astype(v.dtype)
# avoid invalid dtype comparisons
# between numbers & strings
# only compare integers/floats
# don't compare integers to datetimelikes
if (not is_numeric_v_string_like(nn, nn_at) and
(is_float_dtype(nn.dtype) or
is_integer_dtype(nn.dtype) and
is_float_dtype(nn_at.dtype) or
is_integer_dtype(nn_at.dtype))):
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
n = np.asarray(n)
def _putmask_preserve(nv, n):
try:
nv[m] = n[m]
except (IndexError, ValueError):
nv[m] = n
return nv
# preserves dtype if possible
if v.dtype.kind == n.dtype.kind:
return _putmask_preserve(v, n)
# change the dtype if needed
dtype, _ = maybe_promote(n.dtype)
if is_extension_type(v.dtype) and is_object_dtype(dtype):
v = v.get_values(dtype)
else:
v = v.astype(dtype)
return _putmask_preserve(v, n)
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans(
[get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers], concat_axis)
blocks = [make_block(
concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement) for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if is_categorical_dtype(dtype):
upcast_cls = 'category'
elif is_datetimetz(dtype):
upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
elif is_float_dtype(dtype) or is_numeric_dtype(dtype):
upcast_cls = dtype.name
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_na:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
g = np.find_common_type(upcast_classes, [])
if is_float_dtype(g):
return g, g.type(np.nan)
elif is_numeric_dtype(g):
if has_none_blocks:
return np.float64, np.nan
else:
return g, None
msg = "invalid dtype determination in get_concat_dtype"
raise AssertionError(msg)
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__, self.block,
self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return _get_dtype(maybe_promote(self.block.dtype,
self.block.fill_value)[0])
@cache_readonly
def is_na(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif self.block.is_sparse:
# fill_value is not NaN and have holes
if not values._null_fill_value and values.sp_index.ngaps > 0:
return False
values_flat = values.ravel(order='K')
else:
values_flat = values.ravel(order='K')
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isna(values_flat[i:i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, 'is_object', False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order='K')
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, 'is_datetimetz', False):
pass
elif getattr(self.block, 'is_categorical', False):
pass
elif getattr(self.block, 'is_sparse', False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_categorical:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| bsd-3-clause | 4,438,622,254,422,599,700 | 32.88472 | 79 | 0.539857 | false |
cerrno/neurokernel | tests/test_mixins.py | 1 | 1747 | #!/usr/bin/env python
import sys
from unittest import main, TestCase
import twiggy
from twiggy.lib.converter import ConversionTable
import neurokernel.mixins as mixins
# Create formatter and filter to produce predictable output that can be tested:
conv = ConversionTable()
conv.add('name', str, '{1}'.format)
conv.add('level', str, '{1}'.format)
conv.add('message', str, '{1}'.format)
conv.genericValue = str
conv.genericItem = '{0}={1}'.format
conv.aggregate = ':'.join
fmt = twiggy.formats.LineFormat(conversion=conv)
def filt(msg):
del msg.fields['time']
return msg
class test_loggermixin(TestCase):
def setUp(self):
output = twiggy.outputs.StreamOutput(format=fmt, stream=sys.stdout)
twiggy.emitters['*'] = twiggy.filters.Emitter(twiggy.levels.DEBUG, filt, output)
self.lm = mixins.LoggerMixin('log')
def test_methods(self):
self.lm.log_debug('abc')
self.lm.log_info('abc')
self.lm.log_warning('abc')
self.lm.log_error('abc')
self.lm.log_critical('abc')
# All output to stdout is buffered within a single test before emission:
self.assertEquals(sys.stdout.getvalue().strip(),
'log:DEBUG:abc\n'
'log:INFO:abc\n'
'log:WARNING:abc\n'
'log:ERROR:abc\n'
'log:CRITICAL:abc')
def test_log_on(self):
self.lm.log_on = False
self.lm.log_debug('abc')
self.lm.log_info('abc')
self.lm.log_warning('abc')
self.lm.log_error('abc')
self.lm.log_critical('abc')
self.assertEquals(sys.stdout.getvalue().strip(), '')
if __name__ == '__main__':
main(buffer=True)
| bsd-3-clause | -4,383,098,464,088,466,400 | 30.196429 | 88 | 0.604465 | false |
MSLNZ/msl-equipment | msl/examples/equipment/energetiq/eq99.py | 1 | 1074 | """
Example showing how to communicate with an EQ-99 Manager from Energetiq.
"""
import time
from msl.equipment import (
EquipmentRecord,
ConnectionRecord,
Backend,
)
record = EquipmentRecord(
manufacturer='Energetiq',
model='EQ-99',
connection=ConnectionRecord(
address='COM6', # update for your controller
backend=Backend.MSL,
)
)
# connect to the Manager
eq99 = record.connect()
# get the total number of running hours of the lamp
print('Lamp ON time is {} hours'.format(eq99.get_lamptime()))
# turn the output on
eq99.set_output(True)
# wait for the lamp to turn on
t0 = time.time()
while True:
value, bitmask = eq99.condition_register()
print('Elapsed time: {:3.0f} seconds, bitmask: {}'.format(time.time() - t0, bitmask))
if bitmask[5] == '1': # index 5 represents the "Lamp on" state
print('Lamp is on')
break
time.sleep(1)
# do other stuff while the lamp is on
time.sleep(10)
# turn the output off when done
eq99.set_output(False)
# disconnect from the Manager
eq99.disconnect()
| mit | -7,131,231,820,671,970,000 | 21.851064 | 89 | 0.676909 | false |
btjhjeon/ConversationalQA | skipthoughts/decoding/train.py | 2 | 7706 | """
Main trainer function
"""
import theano
import theano.tensor as tensor
import cPickle as pkl
import numpy
import copy
import os
import warnings
import sys
import time
import homogeneous_data
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from collections import defaultdict
from utils import *
from layers import get_layer, param_init_fflayer, fflayer, param_init_gru, gru_layer
from optim import adam
from model import init_params, build_model, build_sampler
from vocab import load_dictionary
from search import gen_sample
# main trainer
def trainer(X, C, stmodel,
dimctx=4800, #vector dimensionality
dim_word=620, # word vector dimensionality
dim=1600, # the number of GRU units
encoder='gru',
decoder='gru',
doutput=False,
max_epochs=5,
dispFreq=1,
decay_c=0.,
grad_clip=5.,
n_words=40000,
maxlen_w=100,
optimizer='adam',
batch_size = 16,
saveto='/u/rkiros/research/semhash/models/toy.npz',
dictionary='/ais/gobi3/u/rkiros/bookgen/book_dictionary_large.pkl',
embeddings=None,
saveFreq=1000,
sampleFreq=100,
reload_=False):
# Model options
model_options = {}
model_options['dimctx'] = dimctx
model_options['dim_word'] = dim_word
model_options['dim'] = dim
model_options['encoder'] = encoder
model_options['decoder'] = decoder
model_options['doutput'] = doutput
model_options['max_epochs'] = max_epochs
model_options['dispFreq'] = dispFreq
model_options['decay_c'] = decay_c
model_options['grad_clip'] = grad_clip
model_options['n_words'] = n_words
model_options['maxlen_w'] = maxlen_w
model_options['optimizer'] = optimizer
model_options['batch_size'] = batch_size
model_options['saveto'] = saveto
model_options['dictionary'] = dictionary
model_options['embeddings'] = embeddings
model_options['saveFreq'] = saveFreq
model_options['sampleFreq'] = sampleFreq
model_options['reload_'] = reload_
print model_options
# reload options
if reload_ and os.path.exists(saveto):
print 'reloading...' + saveto
with open('%s.pkl'%saveto, 'rb') as f:
models_options = pkl.load(f)
# load dictionary
print 'Loading dictionary...'
worddict = load_dictionary(dictionary)
# Load pre-trained embeddings, if applicable
if embeddings != None:
print 'Loading embeddings...'
with open(embeddings, 'rb') as f:
embed_map = pkl.load(f)
dim_word = len(embed_map.values()[0])
model_options['dim_word'] = dim_word
preemb = norm_weight(n_words, dim_word)
pz = defaultdict(lambda : 0)
for w in embed_map.keys():
pz[w] = 1
for w in worddict.keys()[:n_words-2]:
if pz[w] > 0:
preemb[worddict[w]] = embed_map[w]
else:
preemb = None
# Inverse dictionary
word_idict = dict()
for kk, vv in worddict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
print 'Building model'
params = init_params(model_options, preemb=preemb)
# reload parameters
if reload_ and os.path.exists(saveto):
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, inps, cost = build_model(tparams, model_options)
print 'Building sampler'
f_init, f_next = build_sampler(tparams, model_options, trng)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=False)
print 'Done'
# weight decay, if applicable
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# after any regularizer
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=False)
print 'Done'
print 'Done'
print 'Building f_grad...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
f_grad_norm = theano.function(inps, [(g**2).sum() for g in grads], profile=False)
f_weight_norm = theano.function([], [(t**2).sum() for k,t in tparams.iteritems()], profile=False)
if grad_clip > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (grad_clip**2),
g / tensor.sqrt(g2) * grad_clip,
g))
grads = new_grads
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
# (compute gradients), (updates parameters)
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Optimization'
# Each sentence in the minibatch have same length (for encoder)
train_iter = homogeneous_data.HomogeneousData([X,C], batch_size=batch_size, maxlen=maxlen_w)
uidx = 0
lrate = 0.01
for eidx in xrange(max_epochs):
n_samples = 0
print 'Epoch ', eidx
for x, c in train_iter:
n_samples += len(x)
uidx += 1
x, mask, ctx = homogeneous_data.prepare_data(x, c, worddict, stmodel, maxlen=maxlen_w, n_words=n_words)
if x == None:
print 'Minibatch with zero sample under length ', maxlen_w
uidx -= 1
continue
ud_start = time.time()
cost = f_grad_shared(x, mask, ctx)
f_update(lrate)
ud = time.time() - ud_start
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
params = unzip(tparams)
numpy.savez(saveto, history_errs=[], **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
if numpy.mod(uidx, sampleFreq) == 0:
x_s = x
mask_s = mask
ctx_s = ctx
for jj in xrange(numpy.minimum(10, len(ctx_s))):
sample, score = gen_sample(tparams, f_init, f_next, ctx_s[jj].reshape(1, model_options['dimctx']), model_options,
trng=trng, k=1, maxlen=100, stochastic=False, use_unk=False)
print 'Truth ',jj,': ',
for vv in x_s[:,jj]:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
for kk, ss in enumerate([sample[0]]):
print 'Sample (', kk,') ', jj, ': ',
for vv in ss:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
print 'Seen %d samples'%n_samples
if __name__ == '__main__':
pass
| mit | -6,543,742,380,335,079,000 | 31.242678 | 133 | 0.536854 | false |
sljrobin/dotfiles | dzen2/.dzen2/scripts/Music.py | 1 | 6137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: Music.py
# Description: Functions for Music
# Author: Simon L. J. Robin | https://sljrobin.org
# Created: 2016-09-11 22:50:11
# Modified: 2016-09-25 23:50:25
#
################################################################################
import os
import subprocess
import sys
sys.path.insert(0, os.environ['HOME'] + "/.dzen2/lib")
import Colors
import Icons
################################################################################
class Music(object):
"""Functions for Music.
"""
def __format_metadata(self, color_artist, color_title, color_album,\
color_percentage, color_repeat, color_random):
"""Formats the song's metadata for printing.
Args:
color_artist: Artist's color.
color_title: Title's color.
color_album: Album's color.
color_percentage: Percentage's color.
color_repeat: Repeat's color.
color_random: Random's color.
"""
# Getting song's metadata
song_metadata = self.__get_metadata() # Metadata list
song_artist = song_metadata[0] # Artist
song_album = song_metadata[1] # Album
song_title = song_metadata[2] # Title
song_time = song_metadata[3] # Time
song_percentage = song_metadata[4] # Percentage
song_repeat = song_metadata[5] # Repeat
song_random = song_metadata[6] # Random
# Artist
sys.stdout.write("^fg(%s)[^fg()" % Colors.CL_BASE03)
sys.stdout.write("^fg(%s)%s^fg()" % (color_artist, song_artist))
sys.stdout.write("^fg(%s)][^fg()" % Colors.CL_BASE03)
# Title
sys.stdout.write("^fg(%s)%s^fg()" % (color_title, song_title))
sys.stdout.write("^fg(%s)][^fg()" % Colors.CL_BASE03)
# Album
sys.stdout.write("^fg(%s)%s^fg()" % (color_album, song_album))
sys.stdout.write("^fg(%s)][^fg()" % Colors.CL_BASE03)
# Time / Percentage
sys.stdout.write("^fg(%s)%s %s%%^fg()" % (color_percentage,\
song_time, song_percentage))
sys.stdout.write("^fg(%s)]^fg()" % Colors.CL_BASE03)
# Repeat
if song_repeat != "off":
sys.stdout.write("^fg(%s)[^fg()" % Colors.CL_BASE03)
sys.stdout.write("^fg(%s)R^fg()" % color_repeat)
sys.stdout.write("^fg(%s)]^fg()" % Colors.CL_BASE03)
# Random
if song_random != "off":
sys.stdout.write("^fg(%s)[^fg()" % Colors.CL_BASE03)
sys.stdout.write("^fg(%s)~^fg()" % color_random)
sys.stdout.write("^fg(%s)]^fg()" % Colors.CL_BASE03)
############################################################################
def __get_metadata(self):
"""Gets the song's metadata.
Returns:
Song's metadata.
"""
# Executing command and parsing output
metadata_format = '%artist%\\n%album%\\n%title%\\n%track%'
cmd = subprocess.Popen(['mpc', '--format', metadata_format],\
stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
cmd_outparsed = cmd_out.split('\n')
# Getting status
status = self.__get_music_status()
# Getting Artist / Album / Title
artist = cmd_outparsed[0]
album = cmd_outparsed[1]
title = cmd_outparsed[2]
# Gettting Time / Percentage / Repeat / Random
for line in cmd_outparsed:
if "#" in line:
# Time
if status == "playing":
time = line.split(' ')[4]
elif status == "paused":
time = line.split(' ')[5]
# Percentage
if status == "playing":
percentage = line.split(' ')[5].translate(None, "()%")
elif status == "paused":
percentage = line.split(' ')[6].translate(None, "()%")
if "volume" in line:
# Repeat
repeat = line.split(' ')[5]
# Random
random = line.split(' ')[9]
# Parsing metadata
metadata = [artist, album, title,\
time, percentage,\
repeat, random]
return metadata
############################################################################
def __get_music_status(self):
"""Gets MPC status.
Returns:
MPC status.
"""
# Executing command and parsing output
cmd = subprocess.Popen(['mpc'], stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
cmd_outparsed = cmd_out.split('\n')
# Looking for MPC status
status_line = cmd_outparsed[1]
for line in cmd_outparsed:
if "playing" in status_line:
status = "playing"
return status
elif "paused" in status_line:
status = "paused"
return status
else:
status = "stopped"
return status
############################################################################
def show_song(self):
"""Shows information about the current playing song.
"""
icon = Icons.Icons() # Icon
# Getting status
status = self.__get_music_status()
if status == "playing":
icon.show_icon("music_play")
self.__format_metadata(Colors.CL_BASE0B, Colors.CL_BASE0D,\
Colors.CL_BASE0A, Colors.CL_BASE08,\
Colors.CL_BASE09, Colors.CL_BASE0E)
elif status == "paused":
icon.show_icon("music_pause")
self.__format_metadata(Colors.CL_BASE04, Colors.CL_BASE04,\
Colors.CL_BASE04, Colors.CL_BASE04,\
Colors.CL_BASE04, Colors.CL_BASE04)
else:
icon.show_icon("music_stop")
| gpl-2.0 | -8,595,044,930,207,759,000 | 36.193939 | 80 | 0.473032 | false |
migasfree/migasfree | setup.py | 1 | 6056 | # -*- coding: UTF-8 -*-
# Copyright (c) 2011-2020 Jose Antonio Chavarría <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Jose Antonio Chavarría'
__license__ = 'GPLv3'
# http://guide.python-distribute.org/
# python setup.py --help-commands
# python setup.py build
# python setup.py sdist
# python setup.py bdist --format=rpm
# python setup.py --command-packages=stdeb.command bdist_deb (python-stdeb)
# http://zetcode.com/articles/packageinpython/
# TODO https://wiki.ubuntu.com/PackagingGuide/Python
# TODO https://help.ubuntu.com/community/PythonRecipes/DebianPackage
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (3, 5, 0, 'final'):
raise SystemExit('migasfree-server requires Python 3.5 or later.')
import os
from distutils.core import setup
from distutils.command.install_data import install_data
PATH = os.path.dirname(__file__)
README = open(os.path.join(PATH, 'README.md')).read()
VERSION = __import__('migasfree').__version__
class InstallData(install_data):
def _find_other_files(self):
data_files = []
for directory in ['packages']:
for root, _, files in os.walk(directory):
final_files = []
for archive in files:
final_files.append(os.path.join(root, archive))
data_files.append(
(
'/usr/share/%s' % os.path.join('migasfree-server', root),
final_files
)
)
return data_files
def _find_doc_files(self):
data_files = []
for root, _, files in os.walk('doc'):
# first level does not matter
if root == 'doc':
continue
final_files = []
for archive in files:
final_files.append(os.path.join(root, archive))
# remove doc directory from root
tmp_dir = root.replace('doc/', '', 1)
data_files.append(
(
'/usr/share/doc/%s' % os.path.join(
'migasfree-server',
tmp_dir
),
final_files
)
)
return data_files
def run(self):
self.data_files.extend(self._find_other_files())
self.data_files.extend(self._find_doc_files())
install_data.run(self)
setup(
name='migasfree-server',
version=VERSION,
description='migasfree-server is a Django app to manage systems management',
long_description=README,
license='GPLv3',
author='Alberto Gacías',
author_email='[email protected]',
url='http://www.migasfree.org/',
platforms=['Linux'],
packages=[
'migasfree',
'migasfree.server',
'migasfree.server.admin',
'migasfree.server.migrations',
'migasfree.server.models',
'migasfree.server.templatetags',
'migasfree.server.views',
'migasfree.catalog',
'migasfree.catalog.migrations',
'migasfree.settings',
'migasfree.stats',
'migasfree.stats.views',
],
package_dir={
'migasfree': 'migasfree',
'migasfree.server': 'migasfree/server',
'migasfree.server.admin': 'migasfree/server/admin',
'migasfree.server.migrations': 'migasfree/server/migrations',
'migasfree.server.models': 'migasfree/server/models',
'migasfree.server.templatetags': 'migasfree/server/templatetags',
'migasfree.server.views': 'migasfree/server/views',
'migasfree.catalog': 'migasfree/catalog',
'migasfree.catalog.migrations': 'migasfree/catalog/migrations',
'migasfree.stats': 'migasfree/stats',
'migasfree.stats.views': 'migasfree/stats/views',
},
cmdclass={
'install_data': InstallData,
},
package_data={
'migasfree': [
'i18n/*/LC_MESSAGES/*.mo',
'server/fixtures/*',
'server/static/ajax-select/*.css',
'server/static/ajax-select/*.js',
'server/static/ajax-select/images/*',
'server/static/css/*',
'server/static/img/*',
'server/static/js/*.js',
'server/static/js/d3/*',
'server/static/fonts/*',
'server/templates/*.html',
'server/templates/*/*.html',
'server/templates/*/*/*.html',
'server/templates/*/*/*/*.html',
'catalog/static/css/*',
'catalog/static/img/*',
'catalog/static/js/*.js',
'catalog/static/js/locales/*.js',
],
},
data_files=[
('/usr/share/doc/migasfree-server', [
'AUTHORS',
'COPYING',
'INSTALL',
'MANIFEST.in',
'README.md',
]),
],
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| gpl-3.0 | -6,168,221,606,599,370,000 | 32.076503 | 81 | 0.577895 | false |
ganga-devs/ganga | ganga/GangaCore/Utility/execute.py | 1 | 13130 | import os
import base64
import subprocess
import threading
import pickle as pickle
import signal
from copy import deepcopy
from GangaCore.Core.exceptions import GangaException
from GangaCore.Utility.logging import getLogger
logger = getLogger()
def bytes2string(obj):
if isinstance(obj, bytes):
return obj.decode("utf-8")
if isinstance(obj, dict):
return {bytes2string(key): bytes2string(value) for key, value in obj.items()}
if isinstance(obj, list):
return [bytes2string(item) for item in obj]
if isinstance(obj, tuple):
return tuple(bytes2string(item) for item in obj)
return obj
def env_update_script(indent=''):
""" This function creates an extension to a python script, or just a python script to be run at the end of the
piece of code we're interested in.
This piece of code will dump the environment after the execution has taken place into a temporary file.
This returns a tuple of the script it's generated and the pipes file handlers used to store the end in memory
Args:
indent (str): This is the indent to apply to the script if this script is to be appended to a python file
"""
fdread, fdwrite = os.pipe()
os.set_inheritable(fdwrite, True)
this_script = '''
import os
import pickle as pickle
with os.fdopen(###FD_WRITE###,'wb') as envpipe:
pickle.dump(dict(os.environ), envpipe, 2)
'''
from GangaCore.GPIDev.Lib.File.FileUtils import indentScript
script = indentScript(this_script, '###INDENT###')
script = script.replace('###INDENT###' , indent )\
.replace('###FD_READ###' , str(fdread) )\
.replace('###FD_WRITE###', str(fdwrite))
return script, (fdread, fdwrite)
def python_wrapper(command, python_setup='', update_env=False, indent=''):
""" This section of code wraps the given python command inside a small wrapper class to allow us to control the output.
Optionally we can also append to the end of this file a script to allow us to extract the environment after we've
finished executing our command.
Args:
command (str): This is the python code to be executed (can be multi-line)
python_setup (str): This is some python code to be executed before the python code in question (aka a script header.
update_env (bool): Contol whether we want to capture the env after running
indent (str): This allows for an indent to be applied to the script so it can be placed inside other python scripts
This returns the file handler objects for the env_update_script, the python wrapper itself and the script which has been generated to be run
"""
fdread, fdwrite = os.pipe()
os.set_inheritable(fdwrite, True)
this_script = '''
from __future__ import print_function
import os, sys, traceback
import pickle as pickle
with os.fdopen(###PKL_FDWRITE###, 'wb') as PICKLE_STREAM:
def output(data):
pickle.dump(data, PICKLE_STREAM, 2)
local_ns = {'pickle' : pickle,
'PICKLE_STREAM' : PICKLE_STREAM,
'output' : output}
try:
full_command = """###SETUP### """
full_command += """ \n###COMMAND### """
exec(full_command, local_ns)
except:
pickle.dump(traceback.format_exc(), PICKLE_STREAM, 2)
'''
from GangaCore.GPIDev.Lib.File.FileUtils import indentScript
script = indentScript(this_script, '###INDENT###')
script = script.replace('###INDENT###' , indent )\
.replace('###SETUP###' , python_setup.strip())\
.replace('###COMMAND###' , command.strip() )\
.replace('###PKL_FDREAD###' , str(fdread) )\
.replace('###PKL_FDWRITE###', str(fdwrite) )
env_file_pipes = None
if update_env:
update_script, env_file_pipes = env_update_script()
script += update_script
return script, (fdread, fdwrite), env_file_pipes
def __reader(pipes, output_ns, output_var, require_output):
""" This function un-pickles a pickle from a file and return it as an element in a dictionary
Args:
pipes (tuple): This is a tuple containing the (read_pipe, write_pipe) from os.pipes containing the pickled object
output_ns (dict): This is the dictionary we should put the un-pickled object
output_var (str): This is the key we should use to determine where to put the object in the output_ns
require_output (bool): Should the reader give a warning if the pickle stream is not readable
"""
os.close(pipes[1])
with os.fdopen(pipes[0], 'rb') as read_file:
try:
# rcurrie this deepcopy hides a strange bug that the wrong dict is sometimes returned from here. Remove at your own risk
output_ns[output_var] = deepcopy(pickle.load(read_file))
except UnicodeDecodeError:
output_ns[output_var] = deepcopy(bytes2string(pickle.load(read_file, encoding="bytes")))
except Exception as err:
if require_output:
logger.error('Error getting output stream from command: %s', err)
def __timeout_func(process, timed_out):
""" This function is used to kill functions which are timing out behind the scenes and taking longer than a
threshold time to execute.
Args:
process (class): This is a subprocess class which knows of the pid of wrapping thread around the command we want to kill
timed_out (Event): A threading event to be set when the command has timed out
"""
if process.returncode is None:
timed_out.set()
try:
os.killpg(process.pid, signal.SIGKILL)
except Exception as e:
logger.error("Exception trying to kill process: %s" % e)
def start_timer(p, timeout):
""" Function to construct and return the timer thread and timed_out
Args:
p (object): This is the subprocess object which will be used to run the command of interest
timeout (int): This is the timeout in seconds after which the command will be killed
"""
# Start the background thread to catch timeout events
timed_out = threading.Event()
timer = threading.Timer(timeout, __timeout_func, args=(p, timed_out))
timer.daemon = True
if timeout is not None:
timer.start()
return timer, timed_out
def update_thread(pipes, thread_output, output_key, require_output):
""" Function to construct and return background thread used to read a pickled object into the thread_output for updating
the environment after executing a users code
Args:
started_threads (list): List containing background threads which have been started
pipes (tuple): Tuple containing (read_pipe, write_pipe) which is the pipe the pickled obj is written to
thread_output (dict): Dictionary containing the thread outputs which are used after executing the command
output_key (str): Used to know where in the thread_output to store the output of this thread
require_output (bool): Does the reader require valid pickled output.
"""
ev = threading.Thread(target=__reader, args=(pipes, thread_output, output_key, require_output))
ev.daemon = True
ev.start()
return ev
def execute(command,
timeout=None,
env=None,
cwd=None,
shell=True,
python_setup='',
eval_includes=None,
update_env=False,
):
"""
Execute an external command.
This will execute an external python command when shell=False or an external bash command when shell=True
Args:
command (str): This is the command that we want to execute in string format
timeout (int): This is the timeout which we want to assign to a function and it will be killed if it runs for longer than n seconds
env (dict): This is the environment to use for launching the new command
cwd (str): This is the cwd the command is to be executed within.
shell (bool): True for a bash command to be executed, False for a command to be executed within Python
python_setup (str): A python command to be executed beore the main command is
eval_includes (str): An string used to construct an environment which, if passed, is used to eval the stdout into a python object
update_env (bool): Should we update the env being passed to what the env was after the command finished running
"""
if update_env and env is None:
raise GangaException('Cannot update the environment if None given.')
if not shell:
# We want to run a python command inside a small Python wrapper
stream_command = 'python -'
command, pkl_file_pipes, env_file_pipes = python_wrapper(command, python_setup, update_env)
else:
# We want to run a shell command inside a _NEW_ shell environment.
# i.e. What I run here I expect to behave in the same way from the command line after I exit Ganga
stream_command = "bash "
if update_env:
# note the exec gets around the problem of indent and base64 gets
# around the \n
command_update, env_file_pipes = env_update_script()
command += ''';python -c "import base64;exec(base64.b64decode(%s))"''' % base64.b64encode(command_update.encode("utf-8"))
# Some minor changes to cleanup the getting of the env
if env is None:
env = os.environ
# Construct the object which will contain the environment we want to run the command in
p = subprocess.Popen(stream_command, shell=True, env=env, cwd=cwd, preexec_fn=os.setsid,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, close_fds=False)
# This is where we store the output
thread_output = {}
# Start the timer thread used to kill commands which have likely stalled
timer, timed_out = start_timer(p, timeout)
if update_env:
env_output_key = 'env_output'
update_env_thread = update_thread(env_file_pipes, thread_output, env_output_key, require_output=True)
if not shell:
pkl_output_key = 'pkl_output'
update_pkl_thread = update_thread(pkl_file_pipes, thread_output, pkl_output_key, require_output=False)
# Execute the main command of interest
logger.debug("Executing Command:\n'%s'" % str(command))
stdout, stderr = p.communicate(command)
# Close the timeout watching thread
logger.debug("stdout: %s" % stdout)
logger.debug("stderr: %s" % stderr)
timer.cancel()
if timeout is not None:
timer.join()
# Finish up and decide what to return
if stderr != '':
# this is still debug as using the environment from dirac default_env maked a stderr message dump out
# even though it works
logger.debug(stderr)
if timed_out.isSet():
return 'Command timed out!'
# Decode any pickled objects from disk
if update_env:
update_env_thread.join()
if env_output_key in thread_output:
env.update(thread_output[env_output_key])
else:
logger.error("Expected to find the updated env after running a command")
logger.error("Command: %s" % command)
logger.error("stdout: %s" % stdout)
logger.error("stderr: %s" % stderr)
raise RuntimeError("Missing update env after running command")
if not shell and not eval_includes:
update_pkl_thread.join()
if pkl_output_key in thread_output:
return thread_output[pkl_output_key]
stdout_temp = None
try:
# If output
if stdout:
if isinstance(stdout, bytes):
stdout_temp = pickle.loads(stdout)
else:
try:
stdout_temp = pickle.loads(stdout.encode("utf-8"))
except pickle.UnpicklingError:
stdout_temp = pickle.loads(stdout.encode("latin1"))
# Downsides to wanting to be explicit in how this failed is you need to know all the ways it can!
except (pickle.UnpicklingError, EOFError, ValueError) as err:
if not shell:
log = logger.error
else:
log = logger.debug
log("Command Failed to Execute:\n%s" % command)
log("Command Output is:\n%s" % stdout)
log("Error received:\n%s" % err)
if not stdout_temp:
local_ns = locals()
if isinstance(eval_includes, str):
try:
exec(eval_includes, {}, local_ns)
except:
logger.debug("Failed to eval the env, can't eval stdout")
pass
if isinstance(stdout, str) and stdout:
try:
stdout_temp = eval(stdout, {}, local_ns)
except Exception as err2:
logger.debug("Err2: %s" % str(err2))
pass
if stdout_temp:
stdout = stdout_temp
return stdout
| gpl-2.0 | -4,757,166,225,924,737,000 | 42.190789 | 144 | 0.640823 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/hitbox.py | 1 | 5692 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
parse_iso8601,
float_or_none,
int_or_none,
compat_str,
determine_ext,
)
class HitboxIE(InfoExtractor):
IE_NAME = 'hitbox'
_VALID_URL = r'https?://(?:www\.)?(?:hitbox|smashcast)\.tv/(?:[^/]+/)*videos?/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.hitbox.tv/video/203213',
'info_dict': {
'id': '203213',
'title': 'hitbox @ gamescom, Sub Button Hype extended, Giveaway - hitbox News Update with Oxy',
'alt_title': 'hitboxlive - Aug 9th #6',
'description': '',
'ext': 'mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 215.1666,
'resolution': 'HD 720p',
'uploader': 'hitboxlive',
'view_count': int,
'timestamp': 1407576133,
'upload_date': '20140809',
'categories': ['Live Show'],
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://www.smashcast.tv/hitboxlive/videos/203213',
'only_matching': True,
}]
def _extract_metadata(self, url, video_id):
thumb_base = 'https://edge.sf.hitbox.tv'
metadata = self._download_json(
'%s/%s' % (url, video_id), video_id, 'Downloading metadata JSON')
date = 'media_live_since'
media_type = 'livestream'
if metadata.get('media_type') == 'video':
media_type = 'video'
date = 'media_date_added'
video_meta = metadata.get(media_type, [])[0]
title = video_meta.get('media_status')
alt_title = video_meta.get('media_title')
description = clean_html(
video_meta.get('media_description') or
video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views'))
timestamp = parse_iso8601(video_meta.get(date), ' ')
categories = [video_meta.get('category_name')]
thumbs = [{
'url': thumb_base + video_meta.get('media_thumbnail'),
'width': 320,
'height': 180
}, {
'url': thumb_base + video_meta.get('media_thumbnail_large'),
'width': 768,
'height': 432
}]
return {
'id': video_id,
'title': title,
'alt_title': alt_title,
'description': description,
'ext': 'mp4',
'thumbnails': thumbs,
'duration': duration,
'uploader': uploader,
'view_count': views,
'timestamp': timestamp,
'categories': categories,
}
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.smashcast.tv/api/player/config/video/%s' % video_id,
video_id, 'Downloading video JSON')
formats = []
for video in player_config['clip']['bitrates']:
label = video.get('label')
if label == 'Auto':
continue
video_url = video.get('url')
if not video_url:
continue
bitrate = int_or_none(video.get('bitrate'))
if determine_ext(video_url) == 'm3u8':
if not video_url.startswith('http'):
continue
formats.append({
'url': video_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'protocol': 'm3u8_native',
})
else:
formats.append({
'url': video_url,
'tbr': bitrate,
'format_note': label,
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.smashcast.tv/api/media/video', video_id)
metadata['formats'] = formats
return metadata
class HitboxLiveIE(HitboxIE):
IE_NAME = 'hitbox:live'
_VALID_URL = r'https?://(?:www\.)?(?:hitbox|smashcast)\.tv/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.hitbox.tv/dimak',
'info_dict': {
'id': 'dimak',
'ext': 'mp4',
'description': 'md5:c9f80fa4410bc588d7faa40003fc7d0e',
'timestamp': int,
'upload_date': compat_str,
'title': compat_str,
'uploader': 'Dimak',
},
'params': {
# live
'skip_download': True,
},
}, {
'url': 'https://www.smashcast.tv/dimak',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if HitboxIE.suitable(url) else super(HitboxLiveIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.smashcast.tv/api/player/config/live/%s' % video_id,
video_id)
formats = []
cdns = player_config.get('cdns')
servers = []
for cdn in cdns:
# Subscribe URLs are not playable
if cdn.get('rtmpSubscribe') is True:
continue
base_url = cdn.get('netConnectionUrl')
host = re.search(r'.+\.([^\.]+\.[^\./]+)/.+', base_url).group(1)
if base_url not in servers:
servers.append(base_url)
for stream in cdn.get('bitrates'):
label = stream.get('label')
if label == 'Auto':
continue
stream_url = stream.get('url')
if not stream_url:
continue
bitrate = int_or_none(stream.get('bitrate'))
if stream.get('provider') == 'hls' or determine_ext(stream_url) == 'm3u8':
if not stream_url.startswith('http'):
continue
formats.append({
'url': stream_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'rtmp_live': True,
})
else:
formats.append({
'url': '%s/%s' % (base_url, stream_url),
'ext': 'mp4',
'tbr': bitrate,
'rtmp_live': True,
'format_note': host,
'page_url': url,
'player_url': 'http://www.hitbox.tv/static/player/flowplayer/flowplayer.commercial-3.2.16.swf',
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.smashcast.tv/api/media/live', video_id)
metadata['formats'] = formats
metadata['is_live'] = True
metadata['title'] = self._live_title(metadata.get('title'))
return metadata
| gpl-3.0 | -2,053,292,684,659,356,200 | 25.598131 | 102 | 0.608046 | false |
RedFantom/GSF-Parser | frames/strategies.py | 1 | 14275 | """
Author: RedFantom
Contributors: Daethyra (Naiii) and Sprigellania (Zarainia)
License: GNU GPLv3 as in LICENSE
Copyright (C) 2016-2018 RedFantom
"""
# Standard Library
from ast import literal_eval
import sys
# UI Libraries
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
# Project Modules
from widgets.strategy.list import StrategiesList
from widgets.strategy.map import Map
from toplevels.strategy.settings import SettingsToplevel
from toplevels.strategy.map import MapToplevel
class StrategiesFrame(ttk.Frame):
"""
Frame to display a StrategiesList and Map widget to allow the user
to create and edit Strategies with custom item in them to visualize
their tactics. An interface to allow real-time Strategy editing is
also provided.
"""
def __init__(self, *args, **kwargs):
ttk.Frame.__init__(self, *args, **kwargs)
"""
The two core widgets of this frame, with lots of callbacks to support
the different functionality. Not all functionality is provided through
callbacks, and providing any other widget than the StrategiesFrame as a
master widget is inadvisable. This is the result of bad coding practices.
"""
self.list = StrategiesList(self, callback=self._set_phase, settings_callback=self.open_settings, frame=self)
self.map = Map(self, moveitem_callback=self.list.move_item_phase, additem_callback=self.list.add_item_to_phase,
canvasheight=385, canvaswidth=385)
self.large = None
self.settings = None
self.in_map = self.map
# Create the widgets to support the description section on the right of the frame.
self.description_header = ttk.Label(self, text="Description", font=("default", 12), justify=tk.LEFT)
self.description = tk.Text(self, width=20 if sys.platform != "linux" else 30, height=23, wrap=tk.WORD)
# Bind the KeyPress event to a callback. A KeyPress is fired when *any* key is pressed on the keyboard.
self.description.bind("<KeyPress>", self.set_description_callback)
self.description_scroll = ttk.Scrollbar(self, orient=tk.VERTICAL, command=self.description.yview)
self.description.config(yscrollcommand=self.description_scroll.set)
self.client = None
self.description_update_task = None
# This frame calls grid_widgets in its __init__ function
self.grid_widgets()
def open_settings(self, *args):
"""
Callback for the Settings button to open a SettingsToplevel.
Only one SettingsToplevel is allowed to be open at any given
time, to prevent any problems with the Client/Server
functionality. If a SettingsToplevel is already open, lifts the
SettingsToplevel to the front so it is visible to the user.
"""
if self.settings:
self.settings.lift()
return
"""
The StrategiesFrame instance is passed as an argument because
not all functionality is provided through callbacks, but some
code is directly executed on the StrategiesFrame instance. Bad
coding practices yet again.
"""
self.settings = SettingsToplevel(master=self, disconnect_callback=self.disconnect_callback)
def grid_widgets(self):
"""It is pretty obvious what this does"""
self.list.grid(column=0, row=1, sticky="nswe", rowspan=2)
self.map.grid(column=1, row=1, sticky="nswe", pady=5, rowspan=2)
self.description_header.grid(column=3, columnspan=2, sticky="w", pady=(5, 0), padx=5, row=1)
self.description.grid(column=3, row=2, sticky="nwe", padx=5, pady=(0, 5))
self.description_scroll.grid(column=4, row=2, sticky="ns")
def _set_phase(self, phase):
"""
Callback for the StrategiesList widget to call when a new Phase
is selected.
:param phase: Phase name
"""
for map in self.maps:
map.update_map(self.list.db[self.list.selected_strategy][phase])
return
def set_description_callback(self, *args):
"""Delay for issue #142"""
self.after(5, self.set_description)
def set_description(self):
"""
Update the description of a certain item in the database. Also
immediately saves the database, so the description is
automatically saved when updated.
"""
if self.client and self.settings.client_permissions[self.client.name][1] is False:
self.description.delete("1.0", tk.END)
self.description.insert("1.0",
self.list.db[self.list.selected_strategy][self.list.selected_phase].description)
if self.list.selected_phase is not None:
self.list.db[self.list.selected_strategy][self.list.selected_phase]. \
description = self.description.get("1.0", tk.END)
self.list.db.save_database()
else:
self.list.db[self.list.selected_strategy].description = self.description.get("1.0", tk.END)
self.list.db.save_database()
if self.settings is not None:
allowed = self.settings.client_permissions[self.client.name][1]
if self.client and (allowed is True or allowed == "True" or allowed == "Master"):
self.send_description()
def send_description(self):
"""
Function to make sure that the description only gets sent two
seconds after stopping typing when editing it, to lower
bandwidth requirements.
"""
if self.description_update_task:
self.after_cancel(self.description_update_task)
self.description_update_task = self.after(
2000, lambda: self.client.update_description(
self.list.selected_strategy, self.list.selected_phase,
self.description.get("1.0", tk.END)))
def show_large(self):
"""
Callback for the Edit (large map)-Button of the StrategiesList
widget to open a larger map in a Toplevel (the MapToplevel from
toplevels.strategy_toplevels)
"""
self.large = MapToplevel(frame=self)
if self.list.selected_phase is None:
return
self.large.map.update_map(self.list.db[self.list.selected_strategy][self.list.selected_phase])
# If the instance is connected to a network, then the Map in the MapToplevel should know about it.
if self.client:
self.large.map.client = self.client
def client_connected(self, client):
"""
Callback for the SettingsToplevel (when open) to call when a
Client object is connected to a network. Sets the client
attribute for this instance, calls another callback, sets the
client attribute for the Map instance and *starts the Client
Thread to start the functionality of the Client*.
"""
self.client = client
self.list.client_connected(client)
self.map.client = self.client
if self.in_map:
self.in_map.client = self.client
self.client.start()
def insert_callback(self, command, args):
"""
Callback that has numerous functions:
- Before doing anything checks if the Client object is valid for
operations to be performed
- Inserts a log entry for the command received into the
ServerToplevel widget if the client is a master client
- Executes the command of the network on the Map widgets with
the given arguments
* add_item
* move_item
* del_item
:param command: command received from the network
:param args: arguments to perform this command
:return: None
:raises: ValueError when the Client is not set or not logged in
:raises: ValueError when the command received is unknown
"""
print("Insert callback received: ", command, args)
# If the command is a login, then only a log should be created, and *all* Strategies in the database
# are sent to the new client to ensure smooth editing of the Strategies
# These are the commands with which the master can control the Server and its Clients
if command == "readonly":
target, allowed = args
if target != self.client.name:
return
allowed = literal_eval(allowed)
for map in self.maps:
map.set_readonly(allowed)
if allowed:
messagebox.showinfo("Info", "You are now allowed to edit the maps.")
else:
messagebox.showinfo("Info", "You are no longer allowed to edit the maps.")
elif command == "kicked":
messagebox.showerror("Info", "You were kicked from the Server.")
self.settings.disconnect_client()
return
elif command == "banned":
messagebox.showerror("Info", "You were banned from the Server.")
self.settings.disconnect_client()
return
elif command == "allowshare":
if not isinstance(args, list):
args = literal_eval(args)
_, name, allowed = args
if not isinstance(allowed, bool):
allowed = literal_eval(allowed)
self.settings.update_share(name, allowed)
if name != self.client.name:
return
if allowed:
messagebox.showinfo("Info", "You are now allowed by the Master of the Server to share your Strategies.")
else:
messagebox.showinfo("Info", "You are now no longer allowed by the Master of the Server to share your "
"Strategies.")
return
elif command == "allowedit":
_, name, allowed = args
if not isinstance(allowed, bool):
allowed = literal_eval(allowed)
if name == self.client.name:
if allowed:
messagebox.showinfo("Info", "You are now allowed by the Master of the Server to edit the "
"Strategies you have available. These edits will be shared with the "
"other users.")
for map in self.maps:
map.set_readonly(False)
else:
messagebox.showinfo("Info", "You are now no longer allowed by the Master of the Server to edit the "
"Strategies you have available.")
for map in self.maps:
map.set_readonly(True)
self.settings.update_edit(name, allowed)
return
elif command == "master":
name = args
if name == self.client.name:
messagebox.showinfo("Info", "You are now the Master of the Server.")
self.settings.update_master()
else:
self.settings.new_master(name)
return
elif command == "master_login":
name = args
self.settings._login_callback(name, "master")
elif command == "client_login":
name = args
self.settings._login_callback(name, "client")
elif command == "logout":
name = args
self.settings._logout_callback(name)
elif command == "description":
_, strategy, phase, description = args
if phase == "None":
phase = None
self.list.db[strategy][phase].description = description
if strategy == self.list.selected_strategy:
self.description.delete("1.0", tk.END)
self.description.insert("1.0", description)
# The arguments *always* include the Strategy name and Phase name for
# the operations to be performed on if these do not match the selected
# Strategy and Phase, then no visible changes occur on the Map widgets.
# However, the saving of the changes happen before this code is reached,
# and thus if the user moves to the other Strategy and Phase that the
# operations were performed on, the user will still see the changed
# elements
elif self.list.selected_strategy != args[0] or self.list.selected_phase != args[1]:
return
# Perform the operations on the Map instances to make the visual changes
elif command == "add_item":
_, _, text, font, color = args
for map in self.maps:
map.add_item(text, font=font, color=color)
elif command == "del_item":
_, _, text = args
for map in self.maps:
map.canvas.delete(map.items[text][0], map.items[text][1])
elif command == "move_item":
_, _, text, x, y = args
for map in self.maps:
rectangle, item = map.items[text]
if map is self.in_map:
coords = (int(int(x) / 768 * 385), int(int(y) / 768 * 385))
map.canvas.coords(item, *coords)
else:
map.canvas.coords(item, int(x), int(y))
map.canvas.coords(rectangle, map.canvas.bbox(item))
else:
raise ValueError("Unknown command: {0} with args {1}".format(command, args))
def disconnect_callback(self):
"""
Callback that is called when the Client is disconnected from the
Server, for whatever reason. All changes the master Client made
are already saved, so this code only resets the state of the
widgets in the StrategiesFrame instance.
"""
self.map.client = None
if self.in_map:
self.in_map.client = None
self.client = None
self.list.client = None
self.map.set_readonly(False)
@property
def maps(self):
"""Return list of Map objects available in StrategiesFrame instance"""
if self.in_map is not self.map:
return [self.map, self.in_map]
return [self.map]
| gpl-3.0 | -627,416,942,596,403,500 | 44.31746 | 120 | 0.605044 | false |
intip/aldryn-bootstrap3 | aldryn_bootstrap3/south_migrations/0022_auto__add_field_boostrap3alertplugin_icon.py | 1 | 25863 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Boostrap3AlertPlugin.icon'
db.add_column(u'aldryn_bootstrap3_boostrap3alertplugin', 'icon',
self.gf(u'django.db.models.fields.CharField')(default=u'', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Boostrap3AlertPlugin.icon'
db.delete_column(u'aldryn_bootstrap3_boostrap3alertplugin', 'icon')
models = {
u'aldryn_bootstrap3.boostrap3alertplugin': {
'Meta': {'object_name': 'Boostrap3AlertPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'icon': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3blockquoteplugin': {
'Meta': {'object_name': 'Boostrap3BlockquotePlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'aldryn_bootstrap3.boostrap3buttonplugin': {
'Meta': {'object_name': 'Boostrap3ButtonPlugin', '_ormbases': ['cms.CMSPlugin']},
'anchor': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'btn_block': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'btn_context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255', 'blank': 'True'}),
'btn_size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'icon_left': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'icon_right': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'}),
'mailto': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'txt_context': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "u'lnk'", 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'default': "u''", 'max_length': '200', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3iconplugin': {
'Meta': {'object_name': 'Boostrap3IconPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'icon': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'})
},
u'aldryn_bootstrap3.boostrap3imageplugin': {
'Meta': {'object_name': 'Boostrap3ImagePlugin', '_ormbases': ['cms.CMSPlugin']},
'alt': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'aspect_ratio': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Image']"}),
'shape': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3labelplugin': {
'Meta': {'object_name': 'Boostrap3LabelPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3panelbodyplugin': {
'Meta': {'object_name': 'Boostrap3PanelBodyPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_bootstrap3.boostrap3panelfooterplugin': {
'Meta': {'object_name': 'Boostrap3PanelFooterPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_bootstrap3.boostrap3panelheadingplugin': {
'Meta': {'object_name': 'Boostrap3PanelHeadingPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'title': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3panelplugin': {
'Meta': {'object_name': 'Boostrap3PanelPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'})
},
u'aldryn_bootstrap3.boostrap3wellplugin': {
'Meta': {'object_name': 'Boostrap3WellPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3columnplugin': {
'Meta': {'object_name': 'Bootstrap3ColumnPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
u'lg_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'tag': ('django.db.models.fields.SlugField', [], {'default': "u'div'", 'max_length': '50'}),
u'xs_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3rowplugin': {
'Meta': {'object_name': 'Bootstrap3RowPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['aldryn_bootstrap3'] | bsd-3-clause | -6,558,731,824,654,015,000 | 92.710145 | 196 | 0.556819 | false |
domanova/highres-cortex | bin/od_column-regionsMain.py | 1 | 18809 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright CEA (2014).
# Copyright Université Paris XI (2014).
#
# Contributor: Olga Domanova <[email protected]>.
#
# This file is part of highres-cortex, a collection of software designed
# to process high-resolution magnetic resonance images of the cerebral
# cortex.
#
# This software is governed by the CeCILL licence under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the CeCILL
# licence as circulated by CEA, CNRS and INRIA at the following URL:
# <http://www.cecill.info/>.
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the licence, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of scientific
# software, that may mean that it is complicated to manipulate, and that
# also therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL licence and that you accept its terms.
# an example how to run this script
# time od_column-regionsMain.py -i /volatile/od243208/brainvisa_manual/ml140175/dist/classif_with_outer_boundaries_ml140175_L.nii.gz -d /volatile/od243208/brainvisa_manual/ad140157_columns/ -k ad140157_L
# od_column-regionsMain.py -i /neurospin/lnao/dysbrain/testBatchColumnsExtrProfiles/af140169/af140169_T1inT2_ColumnsCutNew20It/dist/classif_with_outer_boundaries_af140169_R_cut_noSulci_extended.nii.gz -d /neurospin/lnao/dysbrain/testBatchColumnsExtrProfiles/af140169/af140169_T1inT2_ColumnsCutNew20It/ -k af140169_R_cut_noSulci_extended
from soma import aims, aimsalgo
import sys, glob, os, subprocess, sys, time
import numpy as np
from optparse import OptionParser
import highres_cortex.cortex_topo, highres_cortex.div_gradn, highres_cortex.od_get_exchanged_propvol, highres_cortex.od_relabel_conjunction, highres_cortex.od_relabel, highres_cortex.od_randomize_labels
#read in the path and the directory
pathToClassifFile = None
pathToClassifFileWithoutBorders = None
data_directory = None
result_directory = None
heat_directory = None
keyWord = None
parser = OptionParser('Calculate column-regions in a cortical region')
parser.add_option('-i', dest='pathToClassifFile', help='Path to the volume with labeled cortex (100), and white matter (200), as well as the borders (50 and 150)') # if nothing is given: exit
parser.add_option('-j', dest='pathToClassifFileWithoutBorders', help='Path to the volume with labeled cortex (100), and white matter (200)') # if nothing is given: exit
parser.add_option('-d', dest='data_directory', help='directory for the results') # if nothing is given exit
parser.add_option('-k', dest='keyWord', help='KeyWord for the result files (including the patient ID and the hemisphere)') # if nothing is given exit
options, args = parser.parse_args(sys.argv)
print options
print args
if options.pathToClassifFile is None:
print >> sys.stderr, 'New: exit. No classification volume was given'
sys.exit(0)
else:
pathToClassifFile = options.pathToClassifFile
if options.pathToClassifFileWithoutBorders is None:
print >> sys.stderr, 'New: exit. No pathToClassifFileWithoutBorders volume was given'
sys.exit(0)
else:
pathToClassifFileWithoutBorders = options.pathToClassifFileWithoutBorders
if options.data_directory is None:
print >> sys.stderr, 'New: exit. No directory for results was given'
sys.exit(0)
else:
data_directory = options.data_directory
result_directory = data_directory + 'column_regions/'
heat_directory = data_directory + 'heat/'
iso_directory = data_directory + 'isovolume/'
if options.keyWord is None:
print >> sys.stderr, 'New: exit. No keyword for results was given'
sys.exit(0)
else:
keyWord = options.keyWord
# in the given directory create the subdirectory for the results
if not os.path.exists(result_directory):
os.makedirs(result_directory)
#AimsThreshold -b -m eq -t 50 \
#-i /volatile/od243208/brainvisa_manual/ml140175/classif_with_outer_boundaries_ml140175_L.nii.gz \
#-o /volatile/od243208/brainvisa_manual/ml140175/CSF_interface_ml140175_L.nii
volClassif = aims.read(pathToClassifFile)
arrSurfCSF = np.array(volClassif, copy = False)
arrSurfCSF[np.where(arrSurfCSF != 50)] = 0
arrSurfCSF[np.where(arrSurfCSF == 50)] = 32767
aims.write(volClassif, result_directory + 'CSF_interface_%s.nii' % (keyWord)) # OK
#AimsThreshold -b -m eq -t 150 \
#-i ../classif_with_outer_boundaries.nii.gz \
#-o white_interface.nii
volClassif = aims.read(pathToClassifFile)
arrSurfWhite = np.array(volClassif, copy = False)
arrSurfWhite[np.where(arrSurfWhite != 150)] = 0
arrSurfWhite[np.where(arrSurfWhite == 150)] = 32767
aims.write(volClassif, result_directory + 'white_interface_%s.nii' % (keyWord)) # OK
#ylLabelEachVoxel --verbose \
#-i CSF_interface.nii.gz \
#-o CSF_labelled_interface.nii \
#--first-label 100000001
subprocess.check_call(['ylLabelEachVoxel', '--verbose', '-i', result_directory + 'CSF_interface_%s.nii' % (keyWord), '-o', result_directory + 'CSF_labelled_interface_%s.nii' % (keyWord), '--first-label', '100000001']) # OK
#ylLabelEachVoxel --verbose \
#-i white_interface.nii.gz \
#-o white_labelled_interface.nii \
#--first-label 200000001
subprocess.check_call(['ylLabelEachVoxel', '--verbose', '-i', result_directory + 'white_interface_%s.nii' % (keyWord), '-o', result_directory + 'white_labelled_interface_%s.nii' % (keyWord), '--first-label', '200000001']) # OK
#AimsThreshold -b --fg -1 -m di -t 100 \
#-i ../classif.nii.gz \ # can take the classif with outer boundaries! as cortex is the same there
#-o negative_outside_cortex.nii
volClassif = aims.read(pathToClassifFile)
arrNegOutCortex = np.array(volClassif, copy = False)
arrNegOutCortex[np.where(arrNegOutCortex != 100)] = -1
arrNegOutCortex[np.where(arrNegOutCortex == 100)] = 0
aims.write(volClassif, result_directory + 'negative_outside_cortex_%s.nii' % (keyWord)) # OK
#AimsFileConvert -t S32 \
#-i negative_outside_cortex.nii \
#-o negative_outside_cortex_S32.nii
c = aims.Converter(intype=volClassif, outtype=aims.Volume('S32'))
volNegOutCortex = c(volClassif)
aims.write(volNegOutCortex, result_directory + 'negative_outside_cortex_S32_%s.nii' % (keyWord)) # OK
#AimsMerge -m sv \
#-i negative_outside_cortex_S32.nii \
#-M CSF_labelled_interface.nii \
#-o CSF_labelled_interface_negative_outside.nii
arrNegOutCortex = np.array(volNegOutCortex, copy = False)
volCSFLabelInt = aims.read(result_directory + 'CSF_labelled_interface_%s.nii' % (keyWord))
arrCSFLabelInt = np.array(volCSFLabelInt, copy = False)
arrNegOutCortex[arrCSFLabelInt != 0] = arrCSFLabelInt[arrCSFLabelInt != 0]
aims.write(volNegOutCortex, result_directory + 'CSF_labelled_interface_negative_outside_%s.nii' % (keyWord)) # OK
#AimsMerge -m ao -v 200000000 \
#-i CSF_labelled_interface_negative_outside.nii \
#-M white_labelled_interface.nii \
#-o propvol_CSF_labels.nii.gz
volWhiteLabInt = aims.read(result_directory + 'white_labelled_interface_%s.nii' % (keyWord))
arrWhiteLabInt = np.array(volWhiteLabInt, copy = False)
arrNegOutCortex[arrWhiteLabInt != 0] = 200000000
aims.write(volNegOutCortex, result_directory + 'propvol_CSF_labels_%s.nii.gz' % (keyWord)) # OK
#AimsMerge -m sv \
#-i negative_outside_cortex_S32.nii \
#-M white_labelled_interface.nii \
#-o white_labelled_interface_negative_outside.nii
volNegOutCortex = aims.read(result_directory + 'negative_outside_cortex_S32_%s.nii' % (keyWord))
arrNegOutCortex = np.array(volNegOutCortex, copy = False)
arrNegOutCortex[arrWhiteLabInt != 0] = arrWhiteLabInt[arrWhiteLabInt != 0]
aims.write(volNegOutCortex, result_directory + 'white_labelled_interface_negative_outside_%s.nii' % (keyWord)) # OK
#AimsMerge -m ao -v 100000000 \
#-i white_labelled_interface_negative_outside.nii \
#-M CSF_labelled_interface.nii \
#-o propvol_white_labels.nii.gz
arrNegOutCortex[np.where(arrCSFLabelInt != 0)] = 100000000
aims.write(volNegOutCortex, result_directory + 'propvol_white_labels_%s.nii.gz' % (keyWord)) # OK
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds', result_directory + 'propvol_CSF_labels_%s.nii.gz' % (keyWord), '--step', '-0.05', '--target-label', '200000000', '--output', result_directory + 'heat_CSF_labels_on_white_%s.nii.gz' % (keyWord)]) # OK
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds propvol_CSF_labels.nii.gz \
#--step -0.05 \
#--target-label 200000000 \
#--output heat_CSF_labels_on_white.nii.gz
#time for the whole cortex 1:27.7
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds', result_directory + 'propvol_white_labels_%s.nii.gz' % (keyWord), '--step', '0.05', '--target-label', '100000000', '--output', result_directory + 'heat_white_labels_on_CSF_%s.nii.gz' % (keyWord)]) # OK
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds propvol_white_labels.nii.gz \
#--step 0.05 \
#--target-label 100000000 \
#--output heat_white_labels_on_CSF.nii.gz
#time for the whole cortex 1:43.87
volCSF_labels_on_white = aims.read(result_directory + 'heat_CSF_labels_on_white_%s.nii.gz' % (keyWord))
volwhite_labels_on_CSF = aims.read(result_directory + 'heat_white_labels_on_CSF_%s.nii.gz' % (keyWord))
volClassif = aims.read(pathToClassifFile)
volExchangedPropVol = highres_cortex.od_get_exchanged_propvol.getExchangedPropagationVolume(volCSF_labels_on_white, volwhite_labels_on_CSF, volClassif, result_directory, keyWord)
aims.write(volExchangedPropVol, result_directory + "exchanged_propvol_%s.nii.gz" %(keyWord))
#python get_exchanged_propvol.py # -> exchanged_propvol.nii.gz
# Why is the previous step necessary?
#
# The obvious alternative is to do exactly as described in the OHBM paper: do
# the projections on the original labels of each voxel.
#
# The previous case aggregates the adjacent voxels of one interface that point
# towards the same voxel on the other interface. This reduces
# over-segmentation.
#
# Another way of reducing over-segmentation would be to aggregate together
# voxels that have one projection in common, instead of both (see conjunction
# step later on). But this introduces the problem of transitivity. This was
# investigated previously on the ferret data (under the name Billiard), but was
# considered a dead-end and the above solution seems to solve this problem most
# efficiently.
# There is a problem with the propagation of labels: the step size is fixed,
# which means that sometimes the point can skip the corner of a voxel, and thus
# go directly from a bulk voxel to an outside voxel. In this case it is
# recorded as a "dead-end" advection path, no resulting label is recorded and
# it appears as zero in the result.
#
# This problem also appears in the previous "exchange" step, but is mitigated
# by the subsequent connex component detection (each failed propagation is
# assigned a different label).
#
# Quick fix: fix the conjunction step to not aggregate zeros.
#
# TODO: the proper way to fix this would be to force the advection path to
# respect the boundaries of voxels, so that the corner of voxels cannot be
# skipped over. This would also prevent the advection path from crossing the
# thin CSF surface within the sulcus (comes from skeleton).
# I could take into account the fake cortex–CSF interface that exists at the
# cut plane, by assigning it a special label (e.g. 500000000) in the
# exchanged_propvol label. It would then need to be treated specially: any
# voxel that projects onto this label would be excluded from the region list,
# and thus would not take part in the merging step. This would prevent the
# creation of regions that connect to this spurious surface, but this would not
# prevent the nearby regions from being deformed by the perturbation of the
# field. It would thus probably be overkill to implement this special case.
# Care is needed when dealing with regions close to the cut plane anyway.
#AimsMerge -m oo -l 150 -v 0 \
#-i exchanged_propvol.nii.gz \
#-M ../classif_with_outer_boundaries.nii.gz \
#-o ./exchanged_labels_on_CSF.nii
arrExchangedPropVol = np.array(volExchangedPropVol, copy = False)
arrClassif = np.array(volClassif, copy = False)
arrExchangedPropVol[arrClassif == 150] = 0
aims.write(volExchangedPropVol, result_directory + 'exchanged_labels_on_CSF_%s.nii' %(keyWord)) # OK
#AimsMerge -m oo -l 50 -v 0 \
#-i ./exchanged_propvol.nii.gz \
#-M ../classif_with_outer_boundaries.nii.gz \
#-o ./exchanged_labels_on_white.nii
volExchangedPropVol = aims.read(result_directory + "exchanged_propvol_%s.nii.gz" %(keyWord))
arrExchangedPropVol = np.array(volExchangedPropVol, copy = False)
arrExchangedPropVol[arrClassif == 50] = 0
aims.write(volExchangedPropVol, result_directory + 'exchanged_labels_on_white_%s.nii' %(keyWord)) # OK
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds exchanged_labels_on_CSF.nii \
#--step -0.05 \
#--target-label 0 \
#--output heat_CSF_on_bulk.nii.gz \
#--dest-points heat_CSF_points_on_bulk.nii.gz
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds',result_directory + 'exchanged_labels_on_CSF_%s.nii' %(keyWord), '--step', '-0.05', '--target-label', '0', '--output', result_directory + 'heat_CSF_on_bulk_%s.nii.gz' % (keyWord), '--dest-points', result_directory + 'heat_CSF_points_on_bulk_%s.nii.gz' % (keyWord)])
# time for the full cortex: 4:56.95
#ylPropagateAlongField --verbose \
#--grad-field ../heat/heat.nii.gz \
#--seeds exchanged_labels_on_white.nii \
#--step 0.05 \
#--target-label 0 \
#--output heat_white_on_bulk.nii.gz \
#--dest-points heat_white_points_on_bulk.nii.gz
subprocess.check_call(['time', 'ylPropagateAlongField', '--verbose', '--grad-field', heat_directory + 'heat_%s.nii.gz' % (keyWord), '--seeds',result_directory + 'exchanged_labels_on_white_%s.nii' %(keyWord), '--step', '0.05', '--target-label', '0', '--output', result_directory + 'heat_white_on_bulk_%s.nii.gz' % (keyWord), '--dest-points', result_directory + 'heat_white_points_on_bulk_%s.nii.gz' % (keyWord)])
# time for the full cortex: 5:59.33
#python relabel_conjunction.py # -> ./conjunction.nii.gz
vol1 = aims.read(result_directory + 'heat_CSF_on_bulk_%s.nii.gz' % (keyWord))
vol2 = aims.read(result_directory + 'heat_white_on_bulk_%s.nii.gz' % (keyWord))
volRelabeledConj = highres_cortex.od_relabel_conjunction.relabel_conjunctions(vol1, vol2)
aims.write(volRelabeledConj, result_directory + 'conjunction_%s.nii.gz' % (keyWord))
# Yann added to ensure cortical columns traverse the cortex:
#AimsConnectComp -c 26 \
#-i conjunction.nii.gz \
#-o conjunction_connected.nii.gz
subprocess.check_call(['AimsConnectComp', '-c', '26', '-i', result_directory + 'conjunction_%s.nii.gz' % (keyWord), '-o', result_directory + 'conjunction_connected_%s.nii.gz' % (keyWord)])
#ylMergeCortexColumnRegions --verbose 2 \
#-i conjunction.nii.gz \
#-o merged.nii \
#--proj-csf heat_CSF_points_on_bulk.nii.gz \
#--proj-white heat_white_points_on_bulk.nii.gz \
#--goal-diameter 1
# Yann changed!! to ensure cortical columns traverse the cortex and various diameters are allowed:
#ylMergeCortexColumnRegions --verbose 2 \
#-i conjunction_connected.nii.gz \
#-o merged.nii \
#--proj-csf heat_CSF_points_on_bulk.nii.gz \
#--proj-white heat_white_points_on_bulk.nii.gz \
#--classif ../classif.nii.gz \
#--goal-diameter 1
subprocess.check_call(['time', 'ylMergeCortexColumnRegions', '--verbose', '2', '-i', result_directory + 'conjunction_connected_%s.nii.gz' % (keyWord), '-o',result_directory + 'merged_%s.nii' %(keyWord), '--proj-csf', result_directory + 'heat_CSF_points_on_bulk_%s.nii.gz' % (keyWord), '--proj-white', result_directory + 'heat_white_points_on_bulk_%s.nii.gz' % (keyWord), '--classif', pathToClassifFileWithoutBorders, '--goal-diameter', '1'])
# time for the full cortex : 0:58.83
#python relabel.py
vol1 = aims.read(result_directory + 'merged_%s.nii' %(keyWord))
vol2 = highres_cortex.od_relabel.relabel(vol1)
aims.write(vol2, result_directory + 'merged_relabelled_%s.nii.gz' % (keyWord))
#python randomize_labels.py
vol1 = highres_cortex.od_randomize_labels.relabel(vol2)
aims.write(vol1, result_directory + 'merged_randomized_%s.nii.gz' %(keyWord))
print np.max(np.array(vol1)) # number of different columns 111067
## test for another diameter of cortical columns. E.g. of 3 mm, and 5 mm, and 9mm
#diams = [3, 5, 7, 9]
#diams = [9]
diams = [3, 5, 7, 9]
for diam in diams:
subprocess.check_call(['ylMergeCortexColumnRegions', '--verbose', '2', '-i', result_directory + 'conjunction_connected_%s.nii.gz' % (keyWord), '-o',result_directory + 'merged_%s_diam%s.nii' %(keyWord, diam), '--proj-csf', result_directory + 'heat_CSF_points_on_bulk_%s.nii.gz' % (keyWord), '--proj-white', result_directory + 'heat_white_points_on_bulk_%s.nii.gz' % (keyWord), '--classif', pathToClassifFileWithoutBorders, '--goal-diameter', str(diam)])
#python relabel.py
vol1 = aims.read(result_directory + 'merged_%s_diam%s.nii' %(keyWord, diam))
vol2 = highres_cortex.od_relabel.relabel(vol1)
aims.write(vol2, result_directory + 'merged_relabelled_%s_diam%s.nii.gz' % (keyWord, diam))
#python randomize_labels.py
vol1 = highres_cortex.od_randomize_labels.relabel(vol2)
aims.write(vol1, result_directory + 'merged_randomized_%s_diam%s.nii.gz' %(keyWord, diam))
print np.max(np.array(vol1)) # number of different columns
| gpl-3.0 | -1,059,299,137,137,612,500 | 50.103261 | 461 | 0.718281 | false |
demisto/content | Packs/ApiModules/Scripts/MicrosoftApiModule/MicrosoftApiModule.py | 1 | 23628 | import traceback
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import re
import base64
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from typing import Dict, Tuple, List, Optional
class Scopes:
graph = 'https://graph.microsoft.com/.default'
security_center = 'https://api.securitycenter.windows.com/.default'
# authorization types
OPROXY_AUTH_TYPE = 'oproxy'
SELF_DEPLOYED_AUTH_TYPE = 'self_deployed'
# grant types in self-deployed authorization
CLIENT_CREDENTIALS = 'client_credentials'
AUTHORIZATION_CODE = 'authorization_code'
REFRESH_TOKEN = 'refresh_token' # guardrails-disable-line
DEVICE_CODE = 'urn:ietf:params:oauth:grant-type:device_code'
REGEX_SEARCH_URL = r'(?P<url>https?://[^\s]+)'
SESSION_STATE = 'session_state'
class MicrosoftClient(BaseClient):
def __init__(self, tenant_id: str = '',
auth_id: str = '',
enc_key: str = '',
token_retrieval_url: str = 'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token',
app_name: str = '',
refresh_token: str = '',
auth_code: str = '',
scope: str = 'https://graph.microsoft.com/.default',
grant_type: str = CLIENT_CREDENTIALS,
redirect_uri: str = 'https://localhost/myapp',
resource: Optional[str] = '',
multi_resource: bool = False,
resources: List[str] = None,
verify: bool = True,
self_deployed: bool = False,
azure_ad_endpoint: str = 'https://login.microsoftonline.com',
*args, **kwargs):
"""
Microsoft Client class that implements logic to authenticate with oproxy or self deployed applications.
It also provides common logic to handle responses from Microsoft.
Args:
tenant_id: If self deployed it's the tenant for the app url, otherwise (oproxy) it's the token
auth_id: If self deployed it's the client id, otherwise (oproxy) it's the auth id and may also
contain the token url
enc_key: If self deployed it's the client secret, otherwise (oproxy) it's the encryption key
scope: The scope of the application (only if self deployed)
resource: The resource of the application (only if self deployed)
multi_resource: Where or not module uses a multiple resources (self-deployed, auth_code grant type only)
resources: Resources of the application (for multi-resource mode)
verify: Demisto insecure parameter
self_deployed: Indicates whether the integration mode is self deployed or oproxy
"""
super().__init__(verify=verify, *args, **kwargs) # type: ignore[misc]
if not self_deployed:
auth_id_and_token_retrieval_url = auth_id.split('@')
auth_id = auth_id_and_token_retrieval_url[0]
if len(auth_id_and_token_retrieval_url) != 2:
self.token_retrieval_url = 'https://oproxy.demisto.ninja/obtain-token' # guardrails-disable-line
else:
self.token_retrieval_url = auth_id_and_token_retrieval_url[1]
self.app_name = app_name
self.auth_id = auth_id
self.enc_key = enc_key
self.tenant_id = tenant_id
self.refresh_token = refresh_token
else:
self.token_retrieval_url = token_retrieval_url.format(tenant_id=tenant_id)
self.client_id = auth_id
self.client_secret = enc_key
self.tenant_id = tenant_id
self.auth_code = auth_code
self.grant_type = grant_type
self.resource = resource
self.scope = scope
self.redirect_uri = redirect_uri
self.auth_type = SELF_DEPLOYED_AUTH_TYPE if self_deployed else OPROXY_AUTH_TYPE
self.verify = verify
self.azure_ad_endpoint = azure_ad_endpoint
self.multi_resource = multi_resource
if self.multi_resource:
self.resources = resources if resources else []
self.resource_to_access_token: Dict[str, str] = {}
def http_request(
self, *args, resp_type='json', headers=None,
return_empty_response=False, scope: Optional[str] = None,
resource: str = '', **kwargs):
"""
Overrides Base client request function, retrieves and adds to headers access token before sending the request.
Args:
resp_type: Type of response to return. will be ignored if `return_empty_response` is True.
headers: Headers to add to the request.
return_empty_response: Return the response itself if the return_code is 206.
scope: A scope to request. Currently will work only with self-deployed app.
resource (str): The resource identifier for which the generated token will have access to.
Returns:
Response from api according to resp_type. The default is `json` (dict or list).
"""
if 'ok_codes' not in kwargs:
kwargs['ok_codes'] = (200, 201, 202, 204, 206, 404)
token = self.get_access_token(resource=resource, scope=scope)
default_headers = {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
if headers:
default_headers.update(headers)
response = super()._http_request( # type: ignore[misc]
*args, resp_type="response", headers=default_headers, **kwargs)
# 206 indicates Partial Content, reason will be in the warning header.
# In that case, logs with the warning header will be written.
if response.status_code == 206:
demisto.debug(str(response.headers))
is_response_empty_and_successful = (response.status_code == 204)
if is_response_empty_and_successful and return_empty_response:
return response
# Handle 404 errors instead of raising them as exceptions:
if response.status_code == 404:
try:
error_message = response.json()
except Exception:
error_message = 'Not Found - 404 Response'
raise NotFoundError(error_message)
try:
if resp_type == 'json':
return response.json()
if resp_type == 'text':
return response.text
if resp_type == 'content':
return response.content
if resp_type == 'xml':
ET.parse(response.text)
return response
except ValueError as exception:
raise DemistoException('Failed to parse json object from response: {}'.format(response.content), exception)
def get_access_token(self, resource: str = '', scope: Optional[str] = None) -> str:
"""
Obtains access and refresh token from oproxy server or just a token from a self deployed app.
Access token is used and stored in the integration context
until expiration time. After expiration, new refresh token and access token are obtained and stored in the
integration context.
Args:
resource (str): The resource identifier for which the generated token will have access to.
scope (str): A scope to get instead of the default on the API.
Returns:
str: Access token that will be added to authorization header.
"""
integration_context = get_integration_context()
refresh_token = integration_context.get('current_refresh_token', '')
# Set keywords. Default without the scope prefix.
access_token_keyword = f'{scope}_access_token' if scope else 'access_token'
valid_until_keyword = f'{scope}_valid_until' if scope else 'valid_until'
if self.multi_resource:
access_token = integration_context.get(resource)
else:
access_token = integration_context.get(access_token_keyword)
valid_until = integration_context.get(valid_until_keyword)
if access_token and valid_until:
if self.epoch_seconds() < valid_until:
return access_token
auth_type = self.auth_type
if auth_type == OPROXY_AUTH_TYPE:
if self.multi_resource:
for resource_str in self.resources:
access_token, expires_in, refresh_token = self._oproxy_authorize(resource_str)
self.resource_to_access_token[resource_str] = access_token
self.refresh_token = refresh_token
else:
access_token, expires_in, refresh_token = self._oproxy_authorize(scope=scope)
else:
access_token, expires_in, refresh_token = self._get_self_deployed_token(
refresh_token, scope, integration_context)
time_now = self.epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
# err on the side of caution with a slightly shorter access token validity period
expires_in = expires_in - time_buffer
valid_until = time_now + expires_in
integration_context.update({
access_token_keyword: access_token,
valid_until_keyword: valid_until,
'current_refresh_token': refresh_token
})
# Add resource access token mapping
if self.multi_resource:
integration_context.update(self.resource_to_access_token)
set_integration_context(integration_context)
if self.multi_resource:
return self.resource_to_access_token[resource]
return access_token
def _oproxy_authorize(self, resource: str = '', scope: Optional[str] = None) -> Tuple[str, int, str]:
"""
Gets a token by authorizing with oproxy.
Args:
scope: A scope to add to the request. Do not use it.
resource: Resource to get.
Returns:
tuple: An access token, its expiry and refresh token.
"""
content = self.refresh_token or self.tenant_id
headers = self._add_info_headers()
oproxy_response = requests.post(
self.token_retrieval_url,
headers=headers,
json={
'app_name': self.app_name,
'registration_id': self.auth_id,
'encrypted_token': self.get_encrypted(content, self.enc_key),
'scope': scope
},
verify=self.verify
)
if not oproxy_response.ok:
msg = 'Error in authentication. Try checking the credentials you entered.'
try:
demisto.info('Authentication failure from server: {} {} {}'.format(
oproxy_response.status_code, oproxy_response.reason, oproxy_response.text))
err_response = oproxy_response.json()
server_msg = err_response.get('message')
if not server_msg:
title = err_response.get('title')
detail = err_response.get('detail')
if title:
server_msg = f'{title}. {detail}'
elif detail:
server_msg = detail
if server_msg:
msg += ' Server message: {}'.format(server_msg)
except Exception as ex:
demisto.error('Failed parsing error response - Exception: {}'.format(ex))
raise Exception(msg)
try:
gcloud_function_exec_id = oproxy_response.headers.get('Function-Execution-Id')
demisto.info(f'Google Cloud Function Execution ID: {gcloud_function_exec_id}')
parsed_response = oproxy_response.json()
except ValueError:
raise Exception(
'There was a problem in retrieving an updated access token.\n'
'The response from the Oproxy server did not contain the expected content.'
)
return (parsed_response.get('access_token', ''), parsed_response.get('expires_in', 3595),
parsed_response.get('refresh_token', ''))
def _get_self_deployed_token(self,
refresh_token: str = '',
scope: Optional[str] = None,
integration_context: Optional[dict] = None
) -> Tuple[str, int, str]:
if self.grant_type == AUTHORIZATION_CODE:
if not self.multi_resource:
return self._get_self_deployed_token_auth_code(refresh_token, scope=scope)
else:
expires_in = -1 # init variable as an int
for resource in self.resources:
access_token, expires_in, refresh_token = self._get_self_deployed_token_auth_code(refresh_token,
resource)
self.resource_to_access_token[resource] = access_token
return '', expires_in, refresh_token
elif self.grant_type == DEVICE_CODE:
return self._get_token_device_code(refresh_token, scope, integration_context)
else:
# by default, grant_type is CLIENT_CREDENTIALS
return self._get_self_deployed_token_client_credentials(scope=scope)
def _get_self_deployed_token_client_credentials(self, scope: Optional[str] = None) -> Tuple[str, int, str]:
"""
Gets a token by authorizing a self deployed Azure application in client credentials grant type.
Args:
scope; A scope to add to the headers. Else will get self.scope.
Returns:
tuple: An access token and its expiry.
"""
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': CLIENT_CREDENTIALS
}
# Set scope.
if self.scope or scope:
data['scope'] = scope if scope else self.scope
if self.resource:
data['resource'] = self.resource
response_json: dict = {}
try:
response = requests.post(self.token_retrieval_url, data, verify=self.verify)
if response.status_code not in {200, 201}:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
access_token = response_json.get('access_token', '')
expires_in = int(response_json.get('expires_in', 3595))
return access_token, expires_in, ''
def _get_self_deployed_token_auth_code(
self, refresh_token: str = '', resource: str = '', scope: Optional[str] = None) -> Tuple[str, int, str]:
"""
Gets a token by authorizing a self deployed Azure application.
Returns:
tuple: An access token, its expiry and refresh token.
"""
data = assign_params(
client_id=self.client_id,
client_secret=self.client_secret,
resource=self.resource if not resource else resource,
redirect_uri=self.redirect_uri
)
if scope:
data['scope'] = scope
refresh_token = refresh_token or self._get_refresh_token_from_auth_code_param()
if refresh_token:
data['grant_type'] = REFRESH_TOKEN
data['refresh_token'] = refresh_token
else:
if SESSION_STATE in self.auth_code:
raise ValueError('Malformed auth_code parameter: Please copy the auth code from the redirected uri '
'without any additional info and without the "session_state" query parameter.')
data['grant_type'] = AUTHORIZATION_CODE
data['code'] = self.auth_code
response_json: dict = {}
try:
response = requests.post(self.token_retrieval_url, data, verify=self.verify)
if response.status_code not in {200, 201}:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
access_token = response_json.get('access_token', '')
expires_in = int(response_json.get('expires_in', 3595))
refresh_token = response_json.get('refresh_token', '')
return access_token, expires_in, refresh_token
def _get_token_device_code(
self, refresh_token: str = '', scope: Optional[str] = None, integration_context: Optional[dict] = None
) -> Tuple[str, int, str]:
"""
Gets a token by authorizing a self deployed Azure application.
Returns:
tuple: An access token, its expiry and refresh token.
"""
data = {
'client_id': self.client_id,
'scope': scope
}
if refresh_token:
data['grant_type'] = REFRESH_TOKEN
data['refresh_token'] = refresh_token
else:
data['grant_type'] = DEVICE_CODE
if integration_context:
data['code'] = integration_context.get('device_code')
response_json: dict = {}
try:
response = requests.post(self.token_retrieval_url, data, verify=self.verify)
if response.status_code not in {200, 201}:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
access_token = response_json.get('access_token', '')
expires_in = int(response_json.get('expires_in', 3595))
refresh_token = response_json.get('refresh_token', '')
return access_token, expires_in, refresh_token
def _get_refresh_token_from_auth_code_param(self) -> str:
refresh_prefix = "refresh_token:"
if self.auth_code.startswith(refresh_prefix): # for testing we allow setting the refresh token directly
demisto.debug("Using refresh token set as auth_code")
return self.auth_code[len(refresh_prefix):]
return ''
@staticmethod
def error_parser(error: requests.Response) -> str:
"""
Args:
error (requests.Response): response with error
Returns:
str: string of error
"""
try:
response = error.json()
demisto.error(str(response))
inner_error = response.get('error', {})
if isinstance(inner_error, dict):
err_str = f"{inner_error.get('code')}: {inner_error.get('message')}"
else:
err_str = inner_error
if err_str:
return err_str
# If no error message
raise ValueError
except ValueError:
return error.text
@staticmethod
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
Args:
d (datetime): timestamp
Returns:
int: timestamp in epoch
"""
if not d:
d = MicrosoftClient._get_utcnow()
return int((d - MicrosoftClient._get_utcfromtimestamp(0)).total_seconds())
@staticmethod
def _get_utcnow() -> datetime:
return datetime.utcnow()
@staticmethod
def _get_utcfromtimestamp(_time) -> datetime:
return datetime.utcfromtimestamp(_time)
@staticmethod
def get_encrypted(content: str, key: str) -> str:
"""
Encrypts content with encryption key.
Args:
content: Content to encrypt
key: encryption key from oproxy
Returns:
timestamp: Encrypted content
"""
def create_nonce():
return os.urandom(12)
def encrypt(string, enc_key):
"""
Encrypts string input with encryption key.
Args:
string: String to encrypt
enc_key: Encryption key
Returns:
bytes: Encrypted value
"""
# String to bytes
try:
enc_key = base64.b64decode(enc_key)
except Exception as err:
return_error(f"Error in Microsoft authorization: {str(err)}"
f" Please check authentication related parameters.", error=traceback.format_exc())
# Create key
aes_gcm = AESGCM(enc_key)
# Create nonce
nonce = create_nonce()
# Create ciphered data
data = string.encode()
ct = aes_gcm.encrypt(nonce, data, None)
return base64.b64encode(nonce + ct)
now = MicrosoftClient.epoch_seconds()
encrypted = encrypt(f'{now}:{content}', key).decode('utf-8')
return encrypted
@staticmethod
def _add_info_headers() -> Dict[str, str]:
# pylint: disable=no-member
headers = {}
try:
headers = get_x_content_info_headers()
except Exception as e:
demisto.error('Failed getting integration info: {}'.format(str(e)))
return headers
def device_auth_request(self) -> dict:
response_json = {}
try:
response = requests.post(
url=f'{self.azure_ad_endpoint}/organizations/oauth2/v2.0/devicecode',
data={
'client_id': self.client_id,
'scope': self.scope
},
verify=self.verify
)
if not response.ok:
return_error(f'Error in Microsoft authorization. Status: {response.status_code},'
f' body: {self.error_parser(response)}')
response_json = response.json()
except Exception as e:
return_error(f'Error in Microsoft authorization: {str(e)}')
set_integration_context({'device_code': response_json.get('device_code')})
return response_json
def start_auth(self, complete_command: str) -> str:
response = self.device_auth_request()
message = response.get('message', '')
re_search = re.search(REGEX_SEARCH_URL, message)
url = re_search.group('url') if re_search else None
user_code = response.get('user_code')
return f"""### Authorization instructions
1. To sign in, use a web browser to open the page [{url}]({url})
and enter the code **{user_code}** to authenticate.
2. Run the **{complete_command}** command in the War Room."""
class NotFoundError(Exception):
"""Exception raised for 404 - Not Found errors.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
| mit | 5,221,532,628,645,901,000 | 40.163763 | 119 | 0.577874 | false |
kopringo/Scarky2 | Scarky2/builder/models.py | 1 | 3520 | #-*- coding: utf-8 -*-
from django.db import models, IntegrityError
from django.contrib.auth.models import User
#from sphere_engine import ProblemsClientV3
from django.conf import settings
from django.utils import timezone
import json
import uuid
import code
from logging import Logger
logger = Logger(__file__)
# Create your models here.
class Language(models.Model):
label = models.CharField(max_length=32)
version = models.CharField(max_length=32)
remote_id = models.IntegerField()
visible = models.BooleanField(default=True)
def __unicode__(self):
return u'%s' % self.label
@staticmethod
def sync_languages():
#client = ProblemsClientV3(settings.SPHERE_ENGINE_TOKEN)
languages = client.problems.languages()
languages = json.loads(languages)
for language in languages:
l = Language()
l.label = language['name']
l.version = language['ver']
l.remote_id = language['id']
l.save()
PROBLEM_RANK = (
('bin-date', 'Binary by date'),
('bin-time', 'Binary by time'),
('bin-source', 'Binary by length of source code'),
)
class Problem(models.Model):
code = models.CharField(max_length=8, unique=True)
date = models.DateTimeField()
remote_code = models.CharField(max_length=32)
user = models.ForeignKey(User, blank=True, null=True)
secret = models.CharField(max_length=40)
saved = models.BooleanField(default=False)
name = models.CharField(max_length=128)
content = models.TextField()
input = models.FileField(upload_to='uploaded')
output = models.FileField(upload_to='uploaded')
rank = models.CharField(max_length=16, choices=PROBLEM_RANK)
languages = models.ManyToManyField('Language')
date_start = models.DateTimeField(blank=True, null=True)
date_stop = models.DateTimeField(blank=True, null=True)
website = models.URLField(blank=True)
resource = models.CharField(max_length=128, blank=True)
email = models.EmailField(blank=True)
stats_visits = models.IntegerField(default=0)
stats_submissions = models.IntegerField(default=0)
@staticmethod
def create_problem(user=None):
i = 0
while True:
code = str(uuid.uuid1())[0:8]
secret = str(uuid.uuid1())
try:
problem = Problem()
problem.code = code
problem.secret = secret
problem.date = timezone.now()
problem.user = user
problem.save()
return problem
except IntegrityError as e:
logger.exception(e)
i = i + 1
if i > 10:
raise Exception('create_problem exception')
def __unicode__(self):
return u'%s. %s' % (str(self.id), self.name)
class ProblemFile(models.Model):
name = models.CharField(max_length=128)
oname = models.CharField(max_length=128)
problem = models.ForeignKey('Problem')
class Submission(models.Model):
date = models.DateTimeField()
problem = models.ForeignKey(Problem)
language = models.ForeignKey('Language')
status = models.IntegerField(default=0)
time = models.FloatField(default=0.0)
mem = models.IntegerField(default=0)
remote_id = models.IntegerField(default=0)
def __unicode__(self):
return u'%s' % str(self.id)
#
| mit | 6,629,684,123,329,770,000 | 29.608696 | 64 | 0.618466 | false |
krishnatray/data_science_project_portfolio | galvanize/TechnicalExcercise/q1.py | 1 | 2695 | # Q1 Technical Challenge
# Author: Sushil K Sharma
# -----------------------
"""
Problem Statement:
Create a text content analyzer. This is a tool used by writers to find statistics such as word and sentence count on essays or articles they are writing. Write a Python program that analyzes input from a file and compiles statistics on it.
The program should output: 1. The total word count 2. The count of unique words 3. The number of sentences
"""
# Assumptions:
#-------------
# 1. I have assumed that sentences are ended by period.
# 2. This program is case insensitive i.e. ignored the case for counting words.
def content_analyzer(input_text):
# assumptions: this program is case insensitive i.e. "Word", "WORD", "wOrd", etc. considerd same.
arr = input_text.lower().split()
lines=input_text.split(". ")
# dictionary to keep track of unique words
unique_words = {}
# Initialize Counters
word_count = 0; unique_word_count = 0; sentences_count =0; sentences_length_sum =0
for word in arr:
word_count +=1
if word in unique_words:
unique_words[word] += 1
else:
unique_words[word] = 1
unique_word_count += 1
for sentence in lines:
sentences_count += 1
sentences_length_sum += len(sentence)
avg_sentence_length=0
if sentences_count > 0:
avg_sentence_length = sentences_length_sum / sentences_count
# Print Results
print ("Results:")
print ("-------")
print ("Total word count:", word_count)
print ("Unique Words:", unique_word_count)
print ("Sentences:",sentences_count)
# Brownie points
# --------------
# 1. The ability to calculate the average sentence length in words
print ("Avg. Sentence Length:",sentences_count)
# 2. A list of words used, in order of descending frequency
print ("A list of words used, in order of descending frequency:")
print("--------------------------------------------------------")
unique_words_sorted = sorted(unique_words, key=unique_words.get, reverse=True)
for word in unique_words_sorted:
print(f"{word} {unique_words[word]}" )
# Brownie point
# 4 : The ability to accept input from STDIN, or from a file specified on the command line.
print("**************************")
print("**** Content Analyzer ****")
print("**************************\n")
input_text= input("Please enter a few sentences: ")
content_analyzer(input_text)
print("*************************************")
print("**** Completed: Content Analyzer ****")
print("*************************************")
| mit | -2,832,477,704,902,959,000 | 33.460526 | 239 | 0.589239 | false |
zjj/trac_hack | sample-plugins/Timestamp.py | 1 | 1210 | """Inserts the current time (in seconds) into the wiki page."""
revision = "$Rev: 6326 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-0.12.2/sample-plugins/Timestamp.py $"
#
# The following shows the code for macro, old-style.
#
# The `execute` function serves no purpose other than to illustrate
# the example, it will not be used anymore.
#
# ---- (ignore in your own macro) ----
# --
import time # Trac before version 0.11 was using `time` module
def execute(hdf, txt, env):
t = time.localtime()
return "<b>%s</b>" % time.strftime('%c', t)
# --
# ---- (ignore in your own macro) ----
#
# The following is the converted new-style macro
#
# ---- (reuse for your own macro) ----
# --
from datetime import datetime
# Note: since Trac 0.11, datetime objects are used internally
from genshi.builder import tag
from trac.util.datefmt import format_datetime, utc
from trac.wiki.macros import WikiMacroBase
class TimestampMacro(WikiMacroBase):
"""Inserts the current time (in seconds) into the wiki page."""
def expand_macro(self, formatter, name, args):
t = datetime.now(utc)
return tag.b(format_datetime(t, '%c'))
# --
# ---- (reuse for your own macro) ----
| bsd-3-clause | 1,379,492,025,157,036,800 | 27.139535 | 96 | 0.670248 | false |
maltsev/LatexWebOffice | app/views/auth.py | 1 | 8497 | # -*- coding: utf-8 -*-
"""
* Purpose : managing user account registration and login
* Creation Date : 22-10-2014
* Last Modified : Mo 02 Mär 2015 15:23:28 CET
* Author : maltsev
* Coauthors : mattis, christian
* Sprintnumber : 1
* Backlog entry : RUA1, RUA4
"""
import re
import urllib
import datetime
from django.shortcuts import redirect, render_to_response
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model
User = get_user_model()
from app.common.constants import ERROR_MESSAGES
import settings
from app.models.recoverkey import RecoverKey
# see
# https://docs.djangoproject.com/en/dev/topics/auth/default/#django.contrib.auth.login
## Default handler for login requests by the client that sends the client the login page.
# If correct login details were sent with the request (over POST data), the user will be redirected to a success page.
# Otherwise an error message will be inserted into the django messages queue.
# @param request The HttpRequest Object
def login(request):
if request.user.is_authenticated():
return redirect('projekt')
email = ''
if request.session.has_key('email'):
email=request.session.get('email')
del request.session['email']
if request.method == 'POST' and 'action' in request.POST and 'email' in request.POST:
email = request.POST['email']
if request.POST['action']=='login':
password = request.POST['password']
# Email is case-insensitive, but login is case-sensitive
user = auth.authenticate(username=email.lower(), password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
return redirect('projekt')
else:
messages.error(request, ERROR_MESSAGES['INACTIVEACCOUNT'] % email)
else:
messages.error(request, ERROR_MESSAGES['WRONGLOGINCREDENTIALS'])
elif request.POST['action'] == 'password-lost':
try:
user = User.objects.get(email__iexact=email)
recoverKey = RecoverKey.objects.getByUser(user)
subject="Latexweboffice Passwortreset"
url = request.build_absolute_uri(reverse('recoverpw'))+'?'+urllib.urlencode({'email': email, 'key': recoverKey.key})
body=u"""
Hallo!
Jemand hat einen Link zur Passwortwiederherstellung angefordert: %s
Falls dies nicht von Ihnen angefordert wurde, ignorieren Sie bitte diese Email.
Mit freundlichen Grüßen,
Ihr LatexWebOfficeteam
"""
emailsend=EmailMessage(subject, body % url)
emailsend.to=[email]
emailsend.send()
except ObjectDoesNotExist:
pass
messages.success(request,ERROR_MESSAGES['EMAILPWRECOVERSEND']% email)
sso_url = ''
if 'SSO_URL' in dir(settings):
sso_url = settings.SSO_URL
params = {'email': email, 'IS_SSO_ENABLED': settings.IS_SSO_ENABLED, 'SSO_URL': sso_url}
return render_to_response('login.html', params, context_instance=RequestContext(request))
def lostPwHandler(request):
if request.method == 'GET' and 'email' in request.GET and 'key' in request.GET:
email = request.GET['email']
key = request.GET['key']
try:
user = User.objects.get(email__iexact=email)
if RecoverKey.objects.isValid(user, key):
return render_to_response('passwordrecover.html', {'email':email,'key':key}, context_instance=RequestContext(request))
except ObjectDoesNotExist:
pass
elif request.method == 'POST' and 'email' in request.POST and 'key' in request.POST and 'password1' in request.POST:
email=request.POST['email']
key=request.POST['key']
try:
user=User.objects.get(email__iexact=email)
if RecoverKey.objects.isValid(user, key):
user.set_password(request.POST['password1'])
RecoverKey.objects.get(key=key).delete()
user.save()
messages.success(request,ERROR_MESSAGES['PASSWORDCHANGED'])
request.session['email'] = email
return redirect('login')
except ObjectDoesNotExist:
pass
return render_to_response('passwordrecoverwrong.html',context_instance=RequestContext(request))
## Logout
# @param request The HttpRequest Object
@login_required
def logout(request):
auth.logout(request)
if 'SSO_LOGOUT_URL' in dir(settings) and request.build_absolute_uri().find('https://sso.') == 0:
return redirect(settings.SSO_LOGOUT_URL)
else:
return redirect('login')
## Default handler for registration requests by the client that sends the user the registration page.
# If correct registration details were sent with the request (over POST data), the user will be logged in
# and redirected to the start page
# Otherwise an error message will be inserted into the django messages queue.
# @param request The HttpRequest Object
def registration(request):
if request.user.is_authenticated():
return redirect('projekt')
email = ''
first_name = ''
if request.method == 'POST':
first_name = request.POST['first_name']
email = request.POST['email'].lower()
password1 = request.POST['password1']
password2 = request.POST['password2']
# boolean, true if there are errors in the user data
foundErrors = False
# validation checks
# no empty fields
if first_name == '' or email == '' or password1 == '':
messages.error(request, ERROR_MESSAGES['NOEMPTYFIELDS'])
foundErrors = True
# email already registered
if User.objects.filter(username__iexact=email).count() != 0:
messages.error(request, ERROR_MESSAGES['EMAILALREADYEXISTS'])
foundErrors = True
# no valid email format
if not validEmail(email):
messages.error(request, ERROR_MESSAGES['INVALIDEMAIL'])
foundErrors = True
# passwords may not contain any spaces
if ' ' in password1:
messages.error((request), ERROR_MESSAGES['NOSPACESINPASSWORDS'])
foundErrors = True
# passwords do not match
if password1 != password2:
messages.error(request, ERROR_MESSAGES['PASSWORDSDONTMATCH'])
foundErrors = True
# if all validation checks pass, create new user
if not foundErrors:
new_user = User.objects.create_user(email, email, password=password1)
new_user.first_name = first_name
new_user.save()
# user login and redirection to start page
user = auth.authenticate(username=email, password=password1)
if user is not None:
if user.is_active:
auth.login(request, user)
return redirect('projekt')
else:
messages.error(request, ERROR_MESSAGES['LOGINORREGFAILED'])
sso_url = ''
if 'SSO_URL' in dir(settings):
sso_url = settings.SSO_URL
return render_to_response('registration.html',
{'first_name': first_name, 'IS_SSO_ENABLED': settings.IS_SSO_ENABLED, 'SSO_URL': sso_url, 'email': email},
context_instance=RequestContext(request))
@csrf_exempt
#Überprüft, ob eine Emailadresse bereits registiert ist. Falls sie registiert ist, wird false zurückgesendet. Andernfalls true.
def userexists(request):
from django.http import HttpResponse
if request.method=='POST' and request.POST.get('email'):
if User.objects.filter(username=request.POST.get('email')).exists():
return HttpResponse("false")
return HttpResponse('true')
# Helper function to check if a email address is valid
def validEmail(email):
regex_email=re.compile("^[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
return regex_email.match(email)
| gpl-3.0 | 1,439,265,107,998,456,800 | 35.286325 | 163 | 0.641032 | false |
ContinuumIO/dask | dask/array/random.py | 2 | 19970 | import numbers
import warnings
from itertools import product
from numbers import Integral
from operator import getitem
import numpy as np
from .core import (
normalize_chunks,
Array,
slices_from_chunks,
asarray,
broadcast_shapes,
broadcast_to,
)
from .creation import arange
from ..base import tokenize
from ..highlevelgraph import HighLevelGraph
from ..utils import ignoring, random_state_data, derived_from, skip_doctest
def doc_wraps(func):
""" Copy docstring from one function to another """
warnings.warn(
"dask.array.random.doc_wraps is deprecated and will be removed in a future version",
FutureWarning,
)
def _(func2):
if func.__doc__ is not None:
func2.__doc__ = skip_doctest(func.__doc__)
return func2
return _
class RandomState(object):
"""
Mersenne Twister pseudo-random number generator
This object contains state to deterministically generate pseudo-random
numbers from a variety of probability distributions. It is identical to
``np.random.RandomState`` except that all functions also take a ``chunks=``
keyword argument.
Parameters
----------
seed: Number
Object to pass to RandomState to serve as deterministic seed
RandomState: Callable[seed] -> RandomState
A callable that, when provided with a ``seed`` keyword provides an
object that operates identically to ``np.random.RandomState`` (the
default). This might also be a function that returns a
``randomgen.RandomState``, ``mkl_random``, or
``cupy.random.RandomState`` object.
Examples
--------
>>> import dask.array as da
>>> state = da.random.RandomState(1234) # a seed
>>> x = state.normal(10, 0.1, size=3, chunks=(2,))
>>> x.compute()
array([10.01867852, 10.04812289, 9.89649746])
See Also
--------
np.random.RandomState
"""
def __init__(self, seed=None, RandomState=None):
self._numpy_state = np.random.RandomState(seed)
self._RandomState = RandomState
def seed(self, seed=None):
self._numpy_state.seed(seed)
def _wrap(
self, funcname, *args, size=None, chunks="auto", extra_chunks=(), **kwargs
):
""" Wrap numpy random function to produce dask.array random function
extra_chunks should be a chunks tuple to append to the end of chunks
"""
if size is not None and not isinstance(size, (tuple, list)):
size = (size,)
args_shapes = {ar.shape for ar in args if isinstance(ar, (Array, np.ndarray))}
args_shapes.union(
{ar.shape for ar in kwargs.values() if isinstance(ar, (Array, np.ndarray))}
)
shapes = list(args_shapes)
if size is not None:
shapes.extend([size])
# broadcast to the final size(shape)
size = broadcast_shapes(*shapes)
chunks = normalize_chunks(
chunks,
size, # ideally would use dtype here
dtype=kwargs.get("dtype", np.float64),
)
slices = slices_from_chunks(chunks)
def _broadcast_any(ar, shape, chunks):
if isinstance(ar, Array):
return broadcast_to(ar, shape).rechunk(chunks)
if isinstance(ar, np.ndarray):
return np.ascontiguousarray(np.broadcast_to(ar, shape))
# Broadcast all arguments, get tiny versions as well
# Start adding the relevant bits to the graph
dsk = {}
dsks = []
lookup = {}
small_args = []
dependencies = []
for i, ar in enumerate(args):
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dependencies.append(res)
dsks.append(res.dask)
lookup[i] = res.name
elif isinstance(res, np.ndarray):
name = "array-{}".format(tokenize(res))
lookup[i] = name
dsk[name] = res
small_args.append(ar[tuple(0 for _ in ar.shape)])
else:
small_args.append(ar)
small_kwargs = {}
for key, ar in kwargs.items():
if isinstance(ar, (np.ndarray, Array)):
res = _broadcast_any(ar, size, chunks)
if isinstance(res, Array):
dependencies.append(res)
dsks.append(res.dask)
lookup[key] = res.name
elif isinstance(res, np.ndarray):
name = "array-{}".format(tokenize(res))
lookup[key] = name
dsk[name] = res
small_kwargs[key] = ar[tuple(0 for _ in ar.shape)]
else:
small_kwargs[key] = ar
sizes = list(product(*chunks))
seeds = random_state_data(len(sizes), self._numpy_state)
token = tokenize(seeds, size, chunks, args, kwargs)
name = "{0}-{1}".format(funcname, token)
keys = product(
[name], *([range(len(bd)) for bd in chunks] + [[0]] * len(extra_chunks))
)
blocks = product(*[range(len(bd)) for bd in chunks])
vals = []
for seed, size, slc, block in zip(seeds, sizes, slices, blocks):
arg = []
for i, ar in enumerate(args):
if i not in lookup:
arg.append(ar)
else:
if isinstance(ar, Array):
dependencies.append(ar)
arg.append((lookup[i],) + block)
else: # np.ndarray
arg.append((getitem, lookup[i], slc))
kwrg = {}
for k, ar in kwargs.items():
if k not in lookup:
kwrg[k] = ar
else:
if isinstance(ar, Array):
dependencies.append(ar)
kwrg[k] = (lookup[k],) + block
else: # np.ndarray
kwrg[k] = (getitem, lookup[k], slc)
vals.append(
(_apply_random, self._RandomState, funcname, seed, size, arg, kwrg)
)
meta = _apply_random(
self._RandomState,
funcname,
seed,
(0,) * len(size),
small_args,
small_kwargs,
)
dsk.update(dict(zip(keys, vals)))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return Array(graph, name, chunks + extra_chunks, meta=meta)
@derived_from(np.random.RandomState, skipblocks=1)
def beta(self, a, b, size=None, chunks="auto", **kwargs):
return self._wrap("beta", a, b, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def binomial(self, n, p, size=None, chunks="auto", **kwargs):
return self._wrap("binomial", n, p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def chisquare(self, df, size=None, chunks="auto", **kwargs):
return self._wrap("chisquare", df, size=size, chunks=chunks, **kwargs)
with ignoring(AttributeError):
@derived_from(np.random.RandomState, skipblocks=1)
def choice(self, a, size=None, replace=True, p=None, chunks="auto"):
dependencies = []
# Normalize and validate `a`
if isinstance(a, Integral):
# On windows the output dtype differs if p is provided or
# absent, see https://github.com/numpy/numpy/issues/9867
dummy_p = np.array([1]) if p is not None else p
dtype = np.random.choice(1, size=(), p=dummy_p).dtype
len_a = a
if a < 0:
raise ValueError("a must be greater than 0")
else:
a = asarray(a)
a = a.rechunk(a.shape)
dtype = a.dtype
if a.ndim != 1:
raise ValueError("a must be one dimensional")
len_a = len(a)
dependencies.append(a)
a = a.__dask_keys__()[0]
# Normalize and validate `p`
if p is not None:
if not isinstance(p, Array):
# If p is not a dask array, first check the sum is close
# to 1 before converting.
p = np.asarray(p)
if not np.isclose(p.sum(), 1, rtol=1e-7, atol=0):
raise ValueError("probabilities do not sum to 1")
p = asarray(p)
else:
p = p.rechunk(p.shape)
if p.ndim != 1:
raise ValueError("p must be one dimensional")
if len(p) != len_a:
raise ValueError("a and p must have the same size")
dependencies.append(p)
p = p.__dask_keys__()[0]
if size is None:
size = ()
elif not isinstance(size, (tuple, list)):
size = (size,)
chunks = normalize_chunks(chunks, size, dtype=np.float64)
if not replace and len(chunks[0]) > 1:
err_msg = (
"replace=False is not currently supported for "
"dask.array.choice with multi-chunk output "
"arrays"
)
raise NotImplementedError(err_msg)
sizes = list(product(*chunks))
state_data = random_state_data(len(sizes), self._numpy_state)
name = "da.random.choice-%s" % tokenize(
state_data, size, chunks, a, replace, p
)
keys = product([name], *(range(len(bd)) for bd in chunks))
dsk = {
k: (_choice, state, a, size, replace, p)
for k, state, size in zip(keys, state_data, sizes)
}
graph = HighLevelGraph.from_collections(
name, dsk, dependencies=dependencies
)
return Array(graph, name, chunks, dtype=dtype)
# @derived_from(np.random.RandomState, skipblocks=1)
# def dirichlet(self, alpha, size=None, chunks="auto"):
@derived_from(np.random.RandomState, skipblocks=1)
def exponential(self, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("exponential", scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def f(self, dfnum, dfden, size=None, chunks="auto", **kwargs):
return self._wrap("f", dfnum, dfden, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def gamma(self, shape, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("gamma", shape, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def geometric(self, p, size=None, chunks="auto", **kwargs):
return self._wrap("geometric", p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def gumbel(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("gumbel", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def hypergeometric(self, ngood, nbad, nsample, size=None, chunks="auto", **kwargs):
return self._wrap(
"hypergeometric", ngood, nbad, nsample, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def laplace(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("laplace", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def logistic(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("logistic", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def lognormal(self, mean=0.0, sigma=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("lognormal", mean, sigma, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def logseries(self, p, size=None, chunks="auto", **kwargs):
return self._wrap("logseries", p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def multinomial(self, n, pvals, size=None, chunks="auto", **kwargs):
return self._wrap(
"multinomial",
n,
pvals,
size=size,
chunks=chunks,
extra_chunks=((len(pvals),),),
)
@derived_from(np.random.RandomState, skipblocks=1)
def negative_binomial(self, n, p, size=None, chunks="auto", **kwargs):
return self._wrap("negative_binomial", n, p, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def noncentral_chisquare(self, df, nonc, size=None, chunks="auto", **kwargs):
return self._wrap(
"noncentral_chisquare", df, nonc, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def noncentral_f(self, dfnum, dfden, nonc, size=None, chunks="auto", **kwargs):
return self._wrap(
"noncentral_f", dfnum, dfden, nonc, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def normal(self, loc=0.0, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("normal", loc, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def pareto(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("pareto", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def permutation(self, x):
from .slicing import shuffle_slice
if isinstance(x, numbers.Number):
x = arange(x, chunks="auto")
index = np.arange(len(x))
self._numpy_state.shuffle(index)
return shuffle_slice(x, index)
@derived_from(np.random.RandomState, skipblocks=1)
def poisson(self, lam=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("poisson", lam, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def power(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("power", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def randint(self, low, high=None, size=None, chunks="auto", dtype="l", **kwargs):
return self._wrap(
"randint", low, high, size=size, chunks=chunks, dtype=dtype, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def random_integers(self, low, high=None, size=None, chunks="auto", **kwargs):
return self._wrap(
"random_integers", low, high, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def random_sample(self, size=None, chunks="auto", **kwargs):
return self._wrap("random_sample", size=size, chunks=chunks, **kwargs)
random = random_sample
@derived_from(np.random.RandomState, skipblocks=1)
def rayleigh(self, scale=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("rayleigh", scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_cauchy(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_cauchy", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_exponential(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_exponential", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_gamma(self, shape, size=None, chunks="auto", **kwargs):
return self._wrap("standard_gamma", shape, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_normal(self, size=None, chunks="auto", **kwargs):
return self._wrap("standard_normal", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def standard_t(self, df, size=None, chunks="auto", **kwargs):
return self._wrap("standard_t", df, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def tomaxint(self, size=None, chunks="auto", **kwargs):
return self._wrap("tomaxint", size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def triangular(self, left, mode, right, size=None, chunks="auto", **kwargs):
return self._wrap(
"triangular", left, mode, right, size=size, chunks=chunks, **kwargs
)
@derived_from(np.random.RandomState, skipblocks=1)
def uniform(self, low=0.0, high=1.0, size=None, chunks="auto", **kwargs):
return self._wrap("uniform", low, high, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def vonmises(self, mu, kappa, size=None, chunks="auto", **kwargs):
return self._wrap("vonmises", mu, kappa, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def wald(self, mean, scale, size=None, chunks="auto", **kwargs):
return self._wrap("wald", mean, scale, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def weibull(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("weibull", a, size=size, chunks=chunks, **kwargs)
@derived_from(np.random.RandomState, skipblocks=1)
def zipf(self, a, size=None, chunks="auto", **kwargs):
return self._wrap("zipf", a, size=size, chunks=chunks, **kwargs)
def _choice(state_data, a, size, replace, p):
state = np.random.RandomState(state_data)
return state.choice(a, size=size, replace=replace, p=p)
def _apply_random(RandomState, funcname, state_data, size, args, kwargs):
"""Apply RandomState method with seed"""
if RandomState is None:
RandomState = np.random.RandomState
state = RandomState(state_data)
func = getattr(state, funcname)
return func(*args, size=size, **kwargs)
_state = RandomState()
seed = _state.seed
beta = _state.beta
binomial = _state.binomial
chisquare = _state.chisquare
if hasattr(_state, "choice"):
choice = _state.choice
exponential = _state.exponential
f = _state.f
gamma = _state.gamma
geometric = _state.geometric
gumbel = _state.gumbel
hypergeometric = _state.hypergeometric
laplace = _state.laplace
logistic = _state.logistic
lognormal = _state.lognormal
logseries = _state.logseries
multinomial = _state.multinomial
negative_binomial = _state.negative_binomial
noncentral_chisquare = _state.noncentral_chisquare
noncentral_f = _state.noncentral_f
normal = _state.normal
pareto = _state.pareto
permutation = _state.permutation
poisson = _state.poisson
power = _state.power
rayleigh = _state.rayleigh
random_sample = _state.random_sample
random = random_sample
randint = _state.randint
random_integers = _state.random_integers
triangular = _state.triangular
uniform = _state.uniform
vonmises = _state.vonmises
wald = _state.wald
weibull = _state.weibull
zipf = _state.zipf
"""
Standard distributions
"""
standard_cauchy = _state.standard_cauchy
standard_exponential = _state.standard_exponential
standard_gamma = _state.standard_gamma
standard_normal = _state.standard_normal
standard_t = _state.standard_t
| bsd-3-clause | 2,665,391,172,766,181,000 | 37.330134 | 92 | 0.595143 | false |
odahoda/noisicaa | noisicaa/builtin_nodes/sample_track/node_description.py | 1 | 1608 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisicaa import node_db
SampleTrackDescription = node_db.NodeDescription(
uri='builtin://sample-track',
display_name='Sample Track',
type=node_db.NodeDescription.PROCESSOR,
node_ui=node_db.NodeUIDescription(
type='builtin://sample-track',
),
processor=node_db.ProcessorDescription(
type='builtin://sample-script',
),
builtin_icon='track-type-sample',
ports=[
node_db.PortDescription(
name='out:left',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
node_db.PortDescription(
name='out:right',
direction=node_db.PortDescription.OUTPUT,
types=[node_db.PortDescription.AUDIO],
),
]
)
| gpl-2.0 | 120,325,731,447,951,020 | 31.816327 | 73 | 0.688433 | false |
SqueezeStudioAnimation/omtk | python/omtk/modules/rigLimb.py | 1 | 14788 | import pymel.core as pymel
import collections
from omtk import constants
from omtk.core.classModule import Module
from omtk.core.classCtrl import BaseCtrl
from omtk.core.utils import decorator_uiexpose
from omtk.modules import rigIK
from omtk.modules import rigFK
from omtk.modules import rigTwistbone
from omtk.libs import libRigging
from omtk.libs import libCtrlShapes
from omtk.libs import libAttr
from omtk.libs import libPython
class BaseAttHolder(BaseCtrl):
def __createNode__(self, size=None, refs=None, **kwargs):
# Resolve size automatically if refs are provided.
ref = next(iter(refs), None) if isinstance(refs, collections.Iterable) else refs
if size is None and ref is not None:
size = libRigging.get_recommended_ctrl_size(ref)
else:
size = 1.0
node = libCtrlShapes.create_shape_attrholder(size=size, **kwargs)
# Hide default keyable attributes
node.t.set(channelBox=False)
node.r.set(channelBox=False)
node.s.set(channelBox=False)
return node
class CtrlElbow(BaseCtrl):
def __createNode__(self, size=None, refs=None, *args, **kwargs):
# Resolve size automatically if refs are provided
ref = next(iter(refs), None) if isinstance(refs, collections.Iterable) else refs
if size is None and ref is not None:
size = libRigging.get_recommended_ctrl_size(ref) * 1.25
else:
size = 1.0
return libCtrlShapes.create_shape_cross(size=size, **kwargs)
class Limb(Module):
"""
Generic IK/FK setup. Twistbones are included.
"""
kAttrName_State = 'fkIk' # The name of the IK/FK attribute
_CLASS_SYS_IK = rigIK.IK
_CLASS_SYS_FK = rigFK.FK
_CLASS_CTRL_ATTR = BaseAttHolder
_CLASS_CTRL_ELBOW = CtrlElbow
_CLASS_SYS_TWIST = rigTwistbone.Twistbone
def __init__(self, *args, **kwargs):
super(Limb, self).__init__(*args, **kwargs)
self.sysIK = None
self.sysFK = None
self.sys_twist = []
self.create_twist = True
self.ctrl_elbow = None
self.attState = None
self.offset_ctrl_ik = None
self.ctrl_attrs = None
self.STATE_IK = 1.0
self.STATE_FK = 0.0
def build(self, *args, **kwargs):
super(Limb, self).build(*args, **kwargs)
nomenclature_anm = self.get_nomenclature_anm()
nomenclature_rig = self.get_nomenclature_rig()
# Resolve IK system name
# Create IK system
self.sysIK = self.init_module(
self._CLASS_SYS_IK,
self.sysIK,
inputs=self.chain_jnt,
suffix='ik',
)
self.sysIK.build(constraint=False, **kwargs)
# Create FK system
self.sysFK = self.init_module(
self._CLASS_SYS_FK,
self.sysFK,
inputs=self.chain_jnt,
suffix='fk',
)
# We want to keep the name of the input on the fk
self.sysFK._FORCE_INPUT_NAME = True
self.sysFK.build(constraint=False, **kwargs)
# Create twistbone system if needed
if self.create_twist:
num_twist_sys = self.sysIK.iCtrlIndex
# Ensure the twistbone list have the proper size
libPython.resize_list(self.sys_twist, num_twist_sys)
# If the IK system is a quad, we need to have two twist system
for i, sys_twist in enumerate(self.sys_twist):
# Resolve module name
# todo: validate name
twist_nomenclature = self.get_nomenclature().copy()
twist_nomenclature.add_tokens('bend')
twist_nomenclature += self.rig.nomenclature(self.chain_jnt[i].stripNamespace().nodeName())
# twist_nomenclature = self.get_nomenclature() + self.rig.nomenclature(self.chain_jnt[i].name())
sys_twist = self.init_module(
self._CLASS_SYS_TWIST,
sys_twist,
inputs=self.chain_jnt[i:(i + 2)],
# suffix='bend'
)
self.sys_twist[i] = sys_twist
sys_twist.name = twist_nomenclature.resolve()
sys_twist.build(num_twist=3, create_bend=True, **kwargs)
# Lock X and Y axis on the elbow/knee ctrl
if self.rig.DEFAULT_UPP_AXIS == constants.Axis.y:
libAttr.lock_hide_rotation(self.sysFK.ctrls[1], z=False)
elif self.rig.DEFAULT_UPP_AXIS == constants.Axis.z:
libAttr.lock_hide_rotation(self.sysFK.ctrls[1], y=False)
# Store the offset between the ik ctrl and it's joint equivalent.
# Useful when they don't match for example on a leg setup.
self.offset_ctrl_ik = self.sysIK.ctrl_ik.getMatrix(worldSpace=True) * self.chain_jnt[self.iCtrlIndex].getMatrix(
worldSpace=True).inverse()
# Add attributes to the attribute holder.
# Add ikFk state attribute on the grp_rig.
# This is currently controlled by self.ctrl_attrs.
pymel.addAttr(self.grp_rig, longName=self.kAttrName_State, hasMinValue=True, hasMaxValue=True, minValue=0,
maxValue=1, defaultValue=1, k=True)
attr_ik_weight = self.grp_rig.attr(self.kAttrName_State)
attr_fk_weight = libRigging.create_utility_node('reverse', inputX=attr_ik_weight).outputX
# Create attribute holder (this is where the IK/FK attribute will be stored)
# Note that this is production specific and should be defined in a sub-class implementation.
jnt_hand = self.chain_jnt[self.sysIK.iCtrlIndex]
ctrl_attrs_name = nomenclature_anm.resolve('atts')
if not isinstance(self.ctrl_attrs, self._CLASS_CTRL_ATTR):
self.ctrl_attrs = self._CLASS_CTRL_ATTR()
self.ctrl_attrs.build(name=ctrl_attrs_name, refs=jnt_hand)
self.ctrl_attrs.setParent(self.grp_anm)
pymel.parentConstraint(jnt_hand, self.ctrl_attrs.offset)
pymel.addAttr(self.ctrl_attrs, longName=self.kAttrName_State, hasMinValue=True, hasMaxValue=True, minValue=0,
maxValue=1, defaultValue=1, k=True)
pymel.connectAttr(self.ctrl_attrs.attr(self.kAttrName_State), self.grp_rig.attr(self.kAttrName_State))
# Create a chain for blending ikChain and fkChain
chain_blend = pymel.duplicate(list(self.chain_jnt), renameChildren=True, parentOnly=True)
for input_, node in zip(self.chain_jnt, chain_blend):
blend_nomenclature = nomenclature_rig.rebuild(input_.stripNamespace().nodeName())
node.rename(blend_nomenclature.resolve('blend'))
# Blend ikChain with fkChain
constraint_ik_chain = self.sysIK._chain_ik
if getattr(self.sysIK, '_chain_quad_ik', None):
constraint_ik_chain = self.sysIK._chain_quad_ik
# Note: We need to set the parent of the chain_blend BEFORE creating the constraint.
# Otherwise we might expose oneself to evaluation issues (happened on maya 2018.2).
# The symptom is the chain_blend rotation being aligned to the world and the rig being build on top.
# At first the scene would seem ok, however doing a dgdirty or reloading the scene would introduce flipping.
chain_blend[0].setParent(self.grp_rig)
for blend, oIk, oFk in zip(chain_blend, constraint_ik_chain, self.sysFK.ctrls):
# Note that maintainOffset should not be necessary, however the rigLegQuad IK can be flipped in some
# rare cases. For now since prod need it we'll activate the flag (see Task #70938), however it would
# be appreciated if the ugliness of the rigLegQuad module don't bleed into the rigLimb module.
constraint = pymel.parentConstraint(oIk, oFk, blend, maintainOffset=True)
attr_weight_ik, attr_weight_fk = constraint.getWeightAliasList()
pymel.connectAttr(attr_ik_weight, attr_weight_ik)
pymel.connectAttr(attr_fk_weight, attr_weight_fk)
#
# Create elbow chain
# This provide the elbow ctrl, an animator friendly way of cheating the elbow on top of the blend chain.
#
# Create a chain that provide the elbow controller and override the blend chain
# (witch should only be nodes already)
chain_elbow = pymel.duplicate(self.chain_jnt[:self.sysIK.iCtrlIndex + 1], renameChildren=True, parentOnly=True)
for input_, node in zip(self.chain_jnt, chain_elbow):
nomenclature_elbow = nomenclature_rig.rebuild(input_.stripNamespace().nodeName())
node.rename(nomenclature_elbow.resolve('elbow')) # todo: find a better name???
chain_elbow[0].setParent(self.grp_rig)
# Create elbow ctrl
# Note that this only affect the chain until @iCtrlIndex
for i in range(1, self.sysIK.iCtrlIndex):
ctrl_elbow_name = nomenclature_anm.resolve('elbow{:02}'.format(i))
ctrl_elbow_parent = chain_blend[i]
if not isinstance(self.ctrl_elbow, self._CLASS_CTRL_ELBOW):
self.ctrl_elbow = self._CLASS_CTRL_ELBOW(create_offset=True)
ctrl_elbow_ref = self.chain_jnt[i] # jnt_elbow
self.ctrl_elbow.build(refs=ctrl_elbow_ref)
self.ctrl_elbow.rename(ctrl_elbow_name)
self.ctrl_elbow.setParent(self.grp_anm)
pymel.parentConstraint(ctrl_elbow_parent, self.ctrl_elbow.offset, maintainOffset=False)
pymel.pointConstraint(chain_blend[0], chain_elbow[0], maintainOffset=False)
pymel.aimConstraint(self.ctrl_elbow, chain_elbow[i - 1], worldUpType=2,
worldUpObject=chain_blend[i - 1]) # Object Rotation Up
pymel.aimConstraint(chain_blend[i + 1], chain_elbow[i], worldUpType=2,
worldUpObject=chain_blend[i]) # Object Rotation Up
pymel.pointConstraint(self.ctrl_elbow, chain_elbow[i], maintainOffset=False)
# Constraint the last elbow joint on the blend joint at the ctrl index
pymel.parentConstraint(chain_blend[self.sysIK.iCtrlIndex], chain_elbow[self.sysIK.iCtrlIndex])
# Constraint input chain
# Note that we only constraint to the elbow chain until @iCtrlIndex.
# Afterward we constraint to the blend chain.
for i in range(self.sysIK.iCtrlIndex):
inn = self.chain_jnt[i]
ref = chain_elbow[i]
pymel.parentConstraint(ref, inn, maintainOffset=True) # todo: set to maintainOffset=False?
for i in range(self.sysIK.iCtrlIndex, len(self.chain_jnt)):
inn = self.chain_jnt[i]
ref = chain_blend[i]
pymel.parentConstraint(ref, inn, maintainOffset=True) # todo: set to maintainOffset=False?
# Connect visibility
pymel.connectAttr(attr_ik_weight, self.sysIK.grp_anm.visibility)
pymel.connectAttr(attr_fk_weight, self.sysFK.grp_anm.visibility)
# Connect globalScale
pymel.connectAttr(self.grp_rig.globalScale, self.sysIK.grp_rig.globalScale, force=True)
self.globalScale = self.grp_rig.globalScale # Expose the attribute, the rig will reconise it.
# Parent sub-modules so they are affected by displayLayer assignment and such.
self.sysIK.grp_anm.setParent(self.grp_anm)
self.sysIK.grp_rig.setParent(self.grp_rig)
self.sysFK.grp_anm.setParent(self.grp_anm)
# Patch in case twist network exist, but twist are set to false
if self.create_twist:
for sys_twist in self.sys_twist:
if sys_twist.create_bend:
sys_twist.grp_anm.setParent(self.grp_anm)
sys_twist.grp_rig.setParent(self.grp_rig)
self.attState = attr_ik_weight # Expose state
def unbuild(self):
for twist_sys in self.sys_twist:
twist_sys.unbuild()
if self.sysIK and self.sysIK.is_built():
self.sysIK.unbuild()
if self.sysFK and self.sysFK.is_built():
self.sysFK.unbuild()
super(Limb, self).unbuild()
self.attState = None
def parent_to(self, parent):
# Do nothing as everything is handled by the sysIK and sysFK modules.
pass
#
# Functions called for IK/FK switch (animation tools)
#
def snap_ik_to_fk(self):
# Position ikCtrl
ctrl_ik_tm = self.chain_jnt[self.sysIK.iCtrlIndex].getMatrix(worldSpace=True)
self.sysIK.ctrl_ik.node.setMatrix(self.offset_ctrl_ik * ctrl_ik_tm, worldSpace=True)
# Position swivel
# pos_ref = self.sysFK.ctrls[self.sysIK.iCtrlIndex - 1].getTranslation(space='world')
pos_s = self.sysFK.ctrls[0].getTranslation(space='world')
pos_m = self.sysFK.ctrls[self.sysIK.iCtrlIndex - 1].getTranslation(space='world')
pos_e = self.sysFK.ctrls[self.sysIK.iCtrlIndex].getTranslation(space='world')
length_start = pos_m.distanceTo(pos_s)
length_end = pos_m.distanceTo(pos_e)
length_ratio = length_start / (length_start + length_end)
pos_middle = (pos_e - pos_s) * length_ratio + pos_s
dir_swivel = pos_m - pos_middle
dir_swivel.normalize()
pos_swivel = (dir_swivel * self.sysIK.swivelDistance) + pos_middle
self.sysIK.ctrl_swivel.node.setTranslation(pos_swivel, space='world')
def snap_fk_to_ik(self):
for ctrl, jnt in zip(self.sysFK.ctrls, self.chain_jnt):
ctrl.node.setMatrix(jnt.getMatrix(worldSpace=True), worldSpace=True)
def switch_to_ik(self):
self.snap_ik_to_fk()
attr_state = libAttr.get_settable_attr(self.attState)
if attr_state:
attr_state.set(self.STATE_IK)
def switch_to_fk(self):
self.snap_fk_to_ik()
attr_state = libAttr.get_settable_attr(self.attState)
if attr_state:
attr_state.set(self.STATE_FK)
def iter_ctrls(self):
for ctrl in super(Limb, self).iter_ctrls():
yield ctrl
if self.sysIK:
for ctrl in self.sysIK.iter_ctrls():
yield ctrl
if self.sysFK:
for ctrl in self.sysFK.iter_ctrls():
yield ctrl
yield self.ctrl_attrs
yield self.ctrl_elbow
@decorator_uiexpose()
def assign_twist_weights(self):
for module in self.sys_twist:
if module.__class__.__name__ == self._CLASS_SYS_TWIST.__name__:
module.assign_twist_weights()
@decorator_uiexpose()
def unassign_twist_weights(self):
for module in self.sys_twist:
if module.__class__.__name__ == self._CLASS_SYS_TWIST.__name__:
module.unassign_twist_weights()
def register_plugin():
return Limb
| mit | -8,440,778,741,539,320,000 | 43.275449 | 120 | 0.632675 | false |
dangall/Kaggle-MobileODT-Cancer-Screening | modules/path_munging.py | 1 | 4547 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 27 11:26:31 2017
@author: daniele
"""
import os
import pandas as pd
import numpy as np
def all_image_paths(folderpath):
"""
Returns a list of filenames containing 'jpg'. The returned list has
sublists with filenames, where each sublist is a different folder.
"""
image_pathnames = [[folderandfiles[0]+"/"+imname
for imname in folderandfiles[2] if "jpg" in imname]
for folderandfiles in os.walk(folderpath)
if folderandfiles[2] != []]
image_pathnames = [folder for folder in image_pathnames if folder != []]
return image_pathnames
def batch_list(inputlist, batch_size):
"""
Returns the inputlist split into batches of maximal length batch_size.
Each element in the returned list (i.e. each batch) is itself a list.
"""
list_of_batches = [inputlist[ii: ii+batch_size]
for ii in range(0, len(inputlist), batch_size)]
return list_of_batches
def create_folder(path_to_folder):
"""
If the path_to_folder does not point to an existing folder, this function
creates such a folder.
"""
if not os.path.exists(path_to_folder):
os.makedirs(path_to_folder)
def count_batches(folderpath):
"""
Returns the number of training data batches in the input folder.
Input: string specifying path
Returns: int specifying the number of batches
"""
return sum(["training_images_batch" in filename
for filename in list(os.walk(folderpath))[0][2]])
def get_next_epoch(savedmodel_path):
"""
Read the path to the saved model and returns the next eopch to train on.
Input: string specifying path
Returns: int specifying the next epoch
"""
if savedmodel_path == "":
next_epoch = 1
else:
next_epoch = int(savedmodel_path[savedmodel_path.rfind("-")+1:]) + 1
return next_epoch
def get_modelpath_and_name(savedmodel_path):
"""
Help function which returns the full path to a model, excluding the epoch
number at the end, e.g. "./mybestmodel-40" returns "./mybestmodel".
"""
return savedmodel_path[:savedmodel_path.rfind("-")]
def image_index(pathname):
"""
Helper function for submission. Takes the path to an image, e.g.
"./image_folder/15.jpg", and returns the image name, in this example
"15.jpg".
"""
return pathname[pathname.rfind("/")+1:]
def get_date_string():
"""
Returns the current date and time in the format "yyyy-mm-dd_hh-mm".
For example, 30 April 2017 at 16:40 is returned as '2017-04-30_16-40'.
"""
currentime = pd.datetime.now().isoformat()
# Format the time
dateandminute = currentime[:currentime.rfind(":")
].replace("T", "_").replace(":", "-")
return dateandminute
def submission(probabilities, testing_folder, submission_folder):
"""
Creates a csv submission file from predicted probabilities, compatible with
Kaggle's submission guidelines. The file has the name submissions followed
by the date and time of the file creating, i.e.
"submissions_yyyy-mm-dd_hh-mm.csv".
Parameters:
probabilities: array of predicted probabilities for each image
testing_folder: string specifying the folder containing the
testing-input data. From this folder we fetch the image
name labels (e.g. "15.jpg") and the name of the
classification cateogries.
submission_folder: string specifying the folder into which we save the
submission csv.
Returns: string specifying the full path of the csv file we have saved.
"""
create_folder(submission_folder)
# Get the list of image names ["15.jpg", "42.jpg", "1.jpg", ...]
image_names = np.load(testing_folder + "/testing_namelabels.npy")
image_names = [image_index(path) for path in image_names]
# Get the order of the catogeries ["Type_1", "Type_2", "Type_3"]
categories = np.load(testing_folder + "/type123_order.npy")
# Make a dataframe containing the information
submission_df = pd.DataFrame(probabilities, columns=categories)
submission_df["image_name"] = image_names
submission_df.set_index("image_name", inplace=True)
filename = submission_folder + "/submissions_" + get_date_string() + ".csv"
# Now save to csv
submission_df.to_csv(filename)
return filename
| mit | 3,039,591,322,353,015,000 | 33.976923 | 79 | 0.64614 | false |
ficoos/godot | platform/iphone/detect.py | 1 | 9752 | import os
import sys
def is_active():
return True
def get_name():
return "iOS"
def can_build():
import sys
import os
if sys.platform == 'darwin' or os.environ.has_key("OSXCROSS_IOS"):
return True
return False
def get_opts():
return [
('IPHONEPLATFORM', 'name of the iphone platform', 'iPhoneOS'),
('IPHONEPATH', 'the path to iphone toolchain', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain'),
('IPHONESDK', 'path to the iphone SDK', '/Applications/Xcode.app/Contents/Developer/Platforms/${IPHONEPLATFORM}.platform/Developer/SDKs/${IPHONEPLATFORM}.sdk/'),
('game_center', 'Support for game center', 'yes'),
('store_kit', 'Support for in-app store', 'yes'),
('icloud', 'Support for iCloud', 'yes'),
('ios_gles22_override', 'Force GLES2.0 on iOS', 'yes'),
('ios_exceptions', 'Enable exceptions', 'no'),
('ios_triple', 'Triple for ios toolchain', ''),
('ios_sim', 'Build simulator binary', 'no'),
]
def get_flags():
return [
('tools', 'no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/iphone'])
env['ENV']['PATH'] = env['IPHONEPATH'] + "/Developer/usr/bin/:" + env['ENV']['PATH']
env['CC'] = '$IPHONEPATH/usr/bin/${ios_triple}clang'
env['CXX'] = '$IPHONEPATH/usr/bin/${ios_triple}clang++'
env['AR'] = '$IPHONEPATH/usr/bin/${ios_triple}ar'
env['RANLIB'] = '$IPHONEPATH/usr/bin/${ios_triple}ranlib'
import string
if (env["ios_sim"] == "yes" or env["arch"] == "x86"): # i386, simulator
env["arch"] = "x86"
env["bits"] = "32"
env.Append(CCFLAGS=string.split('-arch i386 -fobjc-abi-version=2 -fobjc-legacy-dispatch -fmessage-length=0 -fpascal-strings -fasm-blocks -D__IPHONE_OS_VERSION_MIN_REQUIRED=40100 -isysroot $IPHONESDK -mios-simulator-version-min=4.3 -DCUSTOM_MATRIX_TRANSFORM_H=\\\"build/iphone/matrix4_iphone.h\\\" -DCUSTOM_VECTOR3_TRANSFORM_H=\\\"build/iphone/vector3_iphone.h\\\"'))
elif (env["arch"] == "arm" or env["arch"] == "arm32" or env["arch"] == "armv7" or env["bits"] == "32"): # arm
env["arch"] = "arm"
env["bits"] = "32"
env.Append(CCFLAGS=string.split('-fno-objc-arc -arch armv7 -fmessage-length=0 -fno-strict-aliasing -fdiagnostics-print-source-range-info -fdiagnostics-show-category=id -fdiagnostics-parseable-fixits -fpascal-strings -isysroot $IPHONESDK -fvisibility=hidden -mthumb "-DIBOutlet=__attribute__((iboutlet))" "-DIBOutletCollection(ClassName)=__attribute__((iboutletcollection(ClassName)))" "-DIBAction=void)__attribute__((ibaction)" -miphoneos-version-min=9.0 -MMD -MT dependencies -isysroot $IPHONESDK'))
else: # armv64
env["arch"] = "arm64"
env["bits"] = "64"
env.Append(CCFLAGS=string.split('-fno-objc-arc -arch arm64 -fmessage-length=0 -fno-strict-aliasing -fdiagnostics-print-source-range-info -fdiagnostics-show-category=id -fdiagnostics-parseable-fixits -fpascal-strings -fvisibility=hidden -MMD -MT dependencies -miphoneos-version-min=9.0 -isysroot $IPHONESDK'))
env.Append(CPPFLAGS=['-DNEED_LONG_INT'])
env.Append(CPPFLAGS=['-DLIBYUV_DISABLE_NEON'])
if (env["arch"] == "x86"):
env['IPHONEPLATFORM'] = 'iPhoneSimulator'
env.Append(LINKFLAGS=['-arch', 'i386', '-mios-simulator-version-min=4.3',
'-isysroot', '$IPHONESDK',
#'-mmacosx-version-min=10.6',
'-Xlinker',
'-objc_abi_version',
'-Xlinker', '2',
'-framework', 'AudioToolbox',
'-framework', 'AVFoundation',
'-framework', 'CoreAudio',
'-framework', 'CoreGraphics',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
'-framework', 'Foundation',
'-framework', 'Security',
'-framework', 'UIKit',
'-framework', 'MediaPlayer',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'SystemConfiguration',
'-F$IPHONESDK',
])
elif (env["arch"] == "arm64"):
env.Append(LINKFLAGS=['-arch', 'arm64', '-Wl,-dead_strip', '-miphoneos-version-min=9.0',
'-isysroot', '$IPHONESDK',
#'-stdlib=libc++',
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'CoreGraphics',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'CoreAudio',
'-framework', 'AudioToolbox',
'-framework', 'SystemConfiguration',
'-framework', 'Security',
#'-framework', 'AdSupport',
'-framework', 'MediaPlayer',
'-framework', 'AVFoundation',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
])
else:
env.Append(LINKFLAGS=['-arch', 'armv7', '-Wl,-dead_strip', '-miphoneos-version-min=9.0',
'-isysroot', '$IPHONESDK',
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'CoreGraphics',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'CoreAudio',
'-framework', 'AudioToolbox',
'-framework', 'SystemConfiguration',
'-framework', 'Security',
#'-framework', 'AdSupport',
'-framework', 'MediaPlayer',
'-framework', 'AVFoundation',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
])
if env['game_center'] == 'yes':
env.Append(CPPFLAGS=['-fblocks', '-DGAME_CENTER_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'GameKit'])
if env['store_kit'] == 'yes':
env.Append(CPPFLAGS=['-DSTOREKIT_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'StoreKit'])
if env['icloud'] == 'yes':
env.Append(CPPFLAGS=['-DICLOUD_ENABLED'])
env.Append(CPPPATH=['$IPHONESDK/usr/include', '$IPHONESDK/System/Library/Frameworks/OpenGLES.framework/Headers', '$IPHONESDK/System/Library/Frameworks/AudioUnit.framework/Headers'])
if (env["target"] == "release"):
env.Append(CCFLAGS=['-O3', '-DNS_BLOCK_ASSERTIONS=1', '-gdwarf-2']) # removed -ffast-math
env.Append(LINKFLAGS=['-O3'])
elif env["target"] == "release_debug":
env.Append(CCFLAGS=['-Os', '-DNS_BLOCK_ASSERTIONS=1', '-DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['-Os'])
env.Append(CPPFLAGS=['-DDEBUG_MEMORY_ENABLED'])
elif (env["target"] == "debug"):
env.Append(CCFLAGS=['-D_DEBUG', '-DDEBUG=1', '-gdwarf-2', '-O0', '-DDEBUG_ENABLED'])
env.Append(CPPFLAGS=['-DDEBUG_MEMORY_ENABLED'])
elif (env["target"] == "profile"):
env.Append(CCFLAGS=['-g', '-pg', '-Os'])
env.Append(LINKFLAGS=['-pg'])
if (env["ios_sim"] == "yes"): # TODO: Check if needed?
env['ENV']['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
env['ENV']['CODESIGN_ALLOCATE'] = '/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/codesign_allocate'
env.Append(CPPFLAGS=['-DIPHONE_ENABLED', '-DUNIX_ENABLED', '-DGLES2_ENABLED', '-DMPC_FIXED_POINT'])
# TODO: Move that to opus module's config
if("module_opus_enabled" in env and env["module_opus_enabled"] != "no"):
env.opus_fixed_point = "yes"
if env["arch"] == "x86":
pass
elif(env["arch"] == "arm64"):
env.Append(CFLAGS=["-DOPUS_ARM64_OPT"])
else:
env.Append(CFLAGS=["-DOPUS_ARM_OPT"])
if env['ios_exceptions'] == 'yes':
env.Append(CPPFLAGS=['-fexceptions'])
else:
env.Append(CPPFLAGS=['-fno-exceptions'])
# env['neon_enabled']=True
env['S_compiler'] = '$IPHONEPATH/Developer/usr/bin/gcc'
import methods
env.Append(BUILDERS={'GLSL120': env.Builder(action=methods.build_legacygl_headers, suffix='glsl.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL': env.Builder(action=methods.build_glsl_headers, suffix='glsl.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL120GLES': env.Builder(action=methods.build_gles2_headers, suffix='glsl.h', src_suffix='.glsl')})
| mit | -4,415,321,914,416,388,600 | 49.791667 | 508 | 0.494155 | false |
alphagov/notifications-api | migrations/versions/0252_letter_branding_table.py | 1 | 1503 | """
Revision ID: 0252_letter_branding_table
Revises: 0251_another_letter_org
Create Date: 2019-01-17 15:45:33.242955
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0252_letter_branding_table'
down_revision = '0251_another_letter_org'
def upgrade():
op.create_table('letter_branding',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('filename', sa.String(length=255), nullable=False),
sa.Column('domain', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('domain'),
sa.UniqueConstraint('filename'),
sa.UniqueConstraint('name')
)
op.create_table('service_letter_branding',
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('letter_branding_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(['letter_branding_id'], ['letter_branding.id'], ),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),
sa.PrimaryKeyConstraint('service_id')
)
op.get_bind()
def downgrade():
op.drop_table('service_letter_branding')
op.drop_table('letter_branding')
| mit | 9,020,807,077,260,492,000 | 35.658537 | 99 | 0.587492 | false |
dana-at-cp/cpauto | tests/objects/test_group.py | 1 | 3664 | # -*- coding: utf-8 -*-
"""Tests for cpauto.objects.group module."""
import pytest
import responses
import cpauto
@pytest.mark.parametrize("name,params", [
("grp_basic", {}),
("grp_with_comment", {"comments": "ow now brown cow"}),
("grp_with_tags", {"tags": ["servers", "web", "dns"]}),
])
def test_add(core_client, mgmt_server_base_uri, name, params):
endpoint = mgmt_server_base_uri + 'add-group'
with responses.RequestsMock() as rsps:
resp_body = {'foo': 'bar', 'message': 'OK'}
rsps.add(responses.POST, endpoint,
json=resp_body, status=200,
content_type='application/json')
c = cpauto.Group(core_client)
r = c.add(name=name, params=params)
assert r.status_code == 200
assert r.json() == resp_body
@pytest.mark.parametrize("name,uid,details_level", [
("grp_name", "", ""),
("", "grpuid", ""),
("grp_name", "", "uid"),
("", "grpuid", "full"),
])
def test_show(core_client, mgmt_server_base_uri, name, uid, details_level):
endpoint = mgmt_server_base_uri + 'show-group'
with responses.RequestsMock() as rsps:
resp_body = {'foo': 'bar', 'message': 'OK'}
rsps.add(responses.POST, endpoint,
json=resp_body, status=200,
content_type='application/json')
c = cpauto.Group(core_client)
r = c.show(name=name, uid=uid, details_level=details_level)
assert r.status_code == 200
assert r.json() == resp_body
@pytest.mark.parametrize("name,uid,params", [
("grp_name", "", {"new-name": "grp_name_new"}),
("", "srvuid", {"ignore-errors": True}),
])
def test_set(core_client, mgmt_server_base_uri, name, uid, params):
endpoint = mgmt_server_base_uri + 'set-group'
with responses.RequestsMock() as rsps:
resp_body = {'foo': 'bar', 'message': 'OK'}
rsps.add(responses.POST, endpoint,
json=resp_body, status=200,
content_type='application/json')
c = cpauto.Group(core_client)
r = c.set(name=name, uid=uid, params=params)
assert r.status_code == 200
assert r.json() == resp_body
@pytest.mark.parametrize("name,uid,params", [
("grp_name", "", {}),
("", "grpuid", {}),
("grp_some_other", "", {'details-level': 'full'}),
("", "srvuid", {'ignore-errors': True}),
])
def test_delete(core_client, mgmt_server_base_uri, name, uid, params):
endpoint = mgmt_server_base_uri + 'delete-group'
with responses.RequestsMock() as rsps:
resp_body = {'foo': 'bar', 'message': 'OK'}
rsps.add(responses.POST, endpoint,
json=resp_body, status=200,
content_type='application/json')
c = cpauto.Group(core_client)
r = c.delete(name=name, uid=uid, params=params)
assert r.status_code == 200
assert r.json() == resp_body
@pytest.mark.parametrize("limit,offset,order,details_level", [
(50, 0, [], ''),
(50, 0, [{'ASC': 'foo'}], ''),
(64, 32, [{'DESC': 'bar'}], 'uid'),
])
def test_show_all(core_client, mgmt_server_base_uri,
limit, offset, order, details_level):
endpoint = mgmt_server_base_uri + 'show-groups'
with responses.RequestsMock() as rsps:
resp_body = {'foo': 'bar', 'message': 'OK'}
rsps.add(responses.POST, endpoint,
json=resp_body, status=200,
content_type='application/json')
c = cpauto.Group(core_client)
r = c.show_all(limit=limit, offset=offset,
order=order, details_level=details_level)
assert r.status_code == 200
assert r.json() == resp_body
| apache-2.0 | -7,694,709,256,478,565,000 | 33.895238 | 75 | 0.574236 | false |
dschuff/WAOT | src/wac.py | 1 | 2361 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
RUNTIME_LIB = 'wart'
def find_runtime_dir(start_dir):
lib_name = 'lib' + RUNTIME_LIB + '.a'
if os.path.exists(os.path.join(start_dir, lib_name)):
return os.path.abspath(start_dir)
for d in [os.path.join(start_dir, x) for x in os.listdir(start_dir)
if os.path.isdir(os.path.join(start_dir, x))]:
f = find_runtime_dir(d)
if f:
return f
return None
def log_call_internal(verbose, args):
if verbose:
print >> sys.stderr, ' '.join(args)
try:
subprocess.check_call(args)
except subprocess.CalledProcessError:
print >> sys.stderr, 'Command Failed:'
print >> sys.stderr, ' '.join(args)
sys.exit(1)
def Main(argv):
parser = argparse.ArgumentParser(
description="End-to-end compiler driver for waot tests")
parser.add_argument('-o', '--output', help='Output file', default='a.out')
parser.add_argument('-s', '--spec-test-script',
help='Run translator in spec test script mode',
action='store_true')
parser.add_argument('-v', '--verbose', help='Log calls',
action='store_true')
parser.add_argument('inputs', metavar='INPUT', type=str, nargs='+',
help='input file')
options = parser.parse_args(argv)
def log_call(args):
return log_call_internal(options.verbose, args)
file_dir = os.path.dirname(os.path.abspath(__file__))
runtime_libdir = (
find_runtime_dir(file_dir) or
find_runtime_dir(os.path.join(os.path.dirname(file_dir), 'out')))
if not runtime_libdir:
print 'Could not locate', 'lib' + RUNTIME_LIB + '.a'
return 1
outdir = os.path.dirname(os.path.abspath(options.output))
objs = []
for input in options.inputs:
ll_temp = os.path.join(outdir, os.path.basename(input)) + '.ll'
o_temp = os.path.join(outdir, os.path.basename(input)) + '.o'
wat_flags = ['-o', ll_temp, input]
if options.spec_test_script:
wat_flags.append('-spec-test-script')
log_call([os.path.join(runtime_libdir, 'wat')] + wat_flags)
log_call(['llc', ll_temp, '-filetype=obj', '-o', o_temp])
objs.append(o_temp)
log_call(['gcc', '-o', options.output] + objs +
['-L'+runtime_libdir, '-l'+RUNTIME_LIB])
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| apache-2.0 | -8,867,012,722,067,558,000 | 32.728571 | 76 | 0.6205 | false |
edx-solutions/edx-platform | cms/djangoapps/contentstore/tests/utils.py | 1 | 18287 | '''
Utilities for contentstore tests
'''
import json
import textwrap
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.test.client import Client
from mock import Mock
from opaque_keys.edx.keys import AssetKey, CourseKey
from contentstore.utils import reverse_url
from edx_notifications import apps
from student.models import Registration
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.split_mongo.split import SplitMongoModuleStore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin
from xmodule.modulestore.xml_importer import import_course_from_xml
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
def parse_json(response):
"""Parse response, which is assumed to be json"""
return json.loads(response.content.decode('utf-8'))
def user(email):
"""look up a user by email"""
return User.objects.get(email=email)
def registration(email):
"""look up registration object by email"""
return Registration.objects.get(user__email=email)
class AjaxEnabledTestClient(Client):
"""
Convenience class to make testing easier.
"""
def ajax_post(self, path, data=None, content_type="application/json", **kwargs):
"""
Convenience method for client post which serializes the data into json and sets the accept type
to json
"""
if not isinstance(data, six.string_types):
data = json.dumps(data or {})
kwargs.setdefault("HTTP_X_REQUESTED_WITH", "XMLHttpRequest")
kwargs.setdefault("HTTP_ACCEPT", "application/json")
return self.post(path=path, data=data, content_type=content_type, **kwargs)
def get_html(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to html
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="text/html", **extra)
def get_json(self, path, data=None, follow=False, **extra):
"""
Convenience method for client.get which sets the accept type to json
"""
return self.get(path, data or {}, follow, HTTP_ACCEPT="application/json", **extra)
class CourseTestCase(ProceduralCourseTestMixin, ModuleStoreTestCase):
"""
Base class for Studio tests that require a logged in user and a course.
Also provides helper methods for manipulating and verifying the course.
"""
def setUp(self):
"""
These tests need a user in the DB so that the django Test Client can log them in.
The test user is created in the ModuleStoreTestCase setUp method.
They inherit from the ModuleStoreTestCase class so that the mongodb collection
will be cleared out before each test case execution and deleted
afterwards.
"""
super(CourseTestCase, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=self.user_password)
self.course = CourseFactory.create()
# initialize the Notification subsystem
apps.startup_notification_subsystem()
def create_non_staff_authed_user_client(self, authenticate=True):
"""
Create a non-staff user, log them in (if authenticate=True), and return the client, user to use for testing.
"""
nonstaff, password = self.create_non_staff_user()
client = AjaxEnabledTestClient()
if authenticate:
client.login(username=nonstaff.username, password=password)
return client, nonstaff
def reload_course(self):
"""
Reloads the course object from the database
"""
self.course = self.store.get_course(self.course.id)
def save_course(self):
"""
Updates the course object in the database
"""
self.course.save()
self.store.update_item(self.course, self.user.id)
TEST_VERTICAL = 'vertical_test'
ORPHAN_DRAFT_VERTICAL = 'orphan_draft_vertical'
ORPHAN_DRAFT_HTML = 'orphan_draft_html'
PRIVATE_VERTICAL = 'a_private_vertical'
PUBLISHED_VERTICAL = 'a_published_vertical'
SEQUENTIAL = 'vertical_sequential'
DRAFT_HTML = 'draft_html'
DRAFT_VIDEO = 'draft_video'
LOCKED_ASSET_KEY = AssetKey.from_string('/c4x/edX/toy/asset/sample_static.html')
def import_and_populate_course(self):
"""
Imports the test toy course and populates it with additional test data
"""
content_store = contentstore()
import_course_from_xml(self.store, self.user.id, TEST_DATA_DIR, ['toy'], static_content_store=content_store)
course_id = CourseKey.from_string('/'.join(['edX', 'toy', '2012_Fall']))
# create an Orphan
# We had a bug where orphaned draft nodes caused export to fail. This is here to cover that case.
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
vertical.location = vertical.location.replace(name='no_references')
self.store.update_item(vertical, self.user.id, allow_not_found=True)
orphan_vertical = self.store.get_item(vertical.location)
self.assertEqual(orphan_vertical.location.block_id, 'no_references')
self.assertEqual(len(orphan_vertical.children), len(vertical.children))
# create an orphan vertical and html; we already don't try to import
# the orphaned vertical, but we should make sure we don't import
# the orphaned vertical's child html, too
orphan_draft_vertical = self.store.create_item(
self.user.id, course_id, 'vertical', self.ORPHAN_DRAFT_VERTICAL
)
orphan_draft_html = self.store.create_item(
self.user.id, course_id, 'html', self.ORPHAN_DRAFT_HTML
)
orphan_draft_vertical.children.append(orphan_draft_html.location)
self.store.update_item(orphan_draft_vertical, self.user.id)
# create a Draft vertical
vertical = self.store.get_item(course_id.make_usage_key('vertical', self.TEST_VERTICAL), depth=1)
draft_vertical = self.store.convert_to_draft(vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(draft_vertical))
# create a Private (draft only) vertical
private_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PRIVATE_VERTICAL)
self.assertFalse(self.store.has_published_version(private_vertical))
# create a Published (no draft) vertical
public_vertical = self.store.create_item(self.user.id, course_id, 'vertical', self.PUBLISHED_VERTICAL)
public_vertical = self.store.publish(public_vertical.location, self.user.id)
self.assertTrue(self.store.has_published_version(public_vertical))
# add the new private and new public as children of the sequential
sequential = self.store.get_item(course_id.make_usage_key('sequential', self.SEQUENTIAL))
sequential.children.append(private_vertical.location)
sequential.children.append(public_vertical.location)
self.store.update_item(sequential, self.user.id)
# create an html and video component to make drafts:
draft_html = self.store.create_item(self.user.id, course_id, 'html', self.DRAFT_HTML)
draft_video = self.store.create_item(self.user.id, course_id, 'video', self.DRAFT_VIDEO)
# add them as children to the public_vertical
public_vertical.children.append(draft_html.location)
public_vertical.children.append(draft_video.location)
self.store.update_item(public_vertical, self.user.id)
# publish changes to vertical
self.store.publish(public_vertical.location, self.user.id)
# convert html/video to draft
self.store.convert_to_draft(draft_html.location, self.user.id)
self.store.convert_to_draft(draft_video.location, self.user.id)
# lock an asset
content_store.set_attr(self.LOCKED_ASSET_KEY, 'locked', True)
# create a non-portable link - should be rewritten in new courses
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
new_data = html_module.data = html_module.data.replace(
'/static/',
'/c4x/{0}/{1}/asset/'.format(course_id.org, course_id.course)
)
self.store.update_item(html_module, self.user.id)
html_module = self.store.get_item(html_module.location)
self.assertEqual(new_data, html_module.data)
return course_id
def check_populated_course(self, course_id):
"""
Verifies the content of the given course, per data that was populated in import_and_populate_course
"""
items = self.store.get_items(
course_id,
qualifiers={'category': 'vertical'},
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.check_verticals(items)
def verify_item_publish_state(item, publish_state):
"""Verifies the publish state of the item is as expected."""
self.assertEqual(self.store.has_published_version(item), publish_state)
def get_and_verify_publish_state(item_type, item_name, publish_state):
"""
Gets the given item from the store and verifies the publish state
of the item is as expected.
"""
item = self.store.get_item(course_id.make_usage_key(item_type, item_name))
verify_item_publish_state(item, publish_state)
return item
# verify draft vertical has a published version with published children
vertical = get_and_verify_publish_state('vertical', self.TEST_VERTICAL, True)
for child in vertical.get_children():
verify_item_publish_state(child, True)
# verify that it has a draft too
self.assertTrue(getattr(vertical, "is_draft", False))
# make sure that we don't have a sequential that is in draft mode
sequential = get_and_verify_publish_state('sequential', self.SEQUENTIAL, True)
self.assertFalse(getattr(sequential, "is_draft", False))
# verify that we have the private vertical
private_vertical = get_and_verify_publish_state('vertical', self.PRIVATE_VERTICAL, False)
# verify that we have the public vertical
public_vertical = get_and_verify_publish_state('vertical', self.PUBLISHED_VERTICAL, True)
# verify that we have the draft html
draft_html = self.store.get_item(course_id.make_usage_key('html', self.DRAFT_HTML))
self.assertTrue(getattr(draft_html, 'is_draft', False))
# verify that we have the draft video
draft_video = self.store.get_item(course_id.make_usage_key('video', self.DRAFT_VIDEO))
self.assertTrue(getattr(draft_video, 'is_draft', False))
# verify verticals are children of sequential
for vert in [vertical, private_vertical, public_vertical]:
self.assertIn(vert.location, sequential.children)
# verify draft html is the child of the public vertical
self.assertIn(draft_html.location, public_vertical.children)
# verify draft video is the child of the public vertical
self.assertIn(draft_video.location, public_vertical.children)
# verify textbook exists
course = self.store.get_course(course_id)
self.assertGreater(len(course.textbooks), 0)
# verify asset attributes of locked asset key
self.assertAssetsEqual(self.LOCKED_ASSET_KEY, self.LOCKED_ASSET_KEY.course_key, course_id)
# verify non-portable links are rewritten
html_module = self.store.get_item(course_id.make_usage_key('html', 'nonportable'))
self.assertIn('/static/foo.jpg', html_module.data)
return course
def assertCoursesEqual(self, course1_id, course2_id):
"""
Verifies the content of the two given courses are equal
"""
course1_items = self.store.get_items(course1_id)
course2_items = self.store.get_items(course2_id)
self.assertGreater(len(course1_items), 0) # ensure it found content instead of [] == []
if len(course1_items) != len(course2_items):
course1_block_ids = set([item.location.block_id for item in course1_items])
course2_block_ids = set([item.location.block_id for item in course2_items])
raise AssertionError(
u"Course1 extra blocks: {}; course2 extra blocks: {}".format(
course1_block_ids - course2_block_ids, course2_block_ids - course1_block_ids
)
)
for course1_item in course1_items:
course1_item_loc = course1_item.location
course2_item_loc = course2_id.make_usage_key(course1_item_loc.block_type, course1_item_loc.block_id)
if course1_item_loc.block_type == 'course':
# mongo uses the run as the name, split uses 'course'
store = self.store._get_modulestore_for_courselike(course2_id) # pylint: disable=protected-access
new_name = 'course' if isinstance(store, SplitMongoModuleStore) else course2_item_loc.run
course2_item_loc = course2_item_loc.replace(name=new_name)
course2_item = self.store.get_item(course2_item_loc)
# compare published state
self.assertEqual(
self.store.has_published_version(course1_item),
self.store.has_published_version(course2_item)
)
# compare data
self.assertEqual(hasattr(course1_item, 'data'), hasattr(course2_item, 'data'))
if hasattr(course1_item, 'data'):
self.assertEqual(course1_item.data, course2_item.data)
# compare meta-data
course1_metadata = own_metadata(course1_item)
course2_metadata = own_metadata(course2_item)
# Omit edx_video_id as it can be different in case of extrnal video imports.
course1_metadata.pop('edx_video_id', None)
course2_metadata.pop('edx_video_id', None)
self.assertEqual(course1_metadata, course2_metadata)
# compare children
self.assertEqual(course1_item.has_children, course2_item.has_children)
if course1_item.has_children:
expected_children = []
for course1_item_child in course1_item.children:
expected_children.append(
course2_id.make_usage_key(course1_item_child.block_type, course1_item_child.block_id)
)
self.assertEqual(expected_children, course2_item.children)
# compare assets
content_store = self.store.contentstore
course1_assets, count_course1_assets = content_store.get_all_content_for_course(course1_id)
_, count_course2_assets = content_store.get_all_content_for_course(course2_id)
self.assertEqual(count_course1_assets, count_course2_assets)
for asset in course1_assets:
asset_son = asset.get('content_son', asset['_id'])
self.assertAssetsEqual(asset_son, course1_id, course2_id)
def check_verticals(self, items):
""" Test getting the editing HTML for each vertical. """
# assert is here to make sure that the course being tested actually has verticals (units) to check.
self.assertGreater(len(items), 0, "Course has no verticals (units) to check")
for descriptor in items:
resp = self.client.get_html(get_url('container_handler', descriptor.location))
self.assertEqual(resp.status_code, 200)
def assertAssetsEqual(self, asset_son, course1_id, course2_id):
"""Verifies the asset of the given key has the same attributes in both given courses."""
content_store = contentstore()
category = asset_son.block_type if hasattr(asset_son, 'block_type') else asset_son['category']
filename = asset_son.block_id if hasattr(asset_son, 'block_id') else asset_son['name']
course1_asset_attrs = content_store.get_attrs(course1_id.make_asset_key(category, filename))
course2_asset_attrs = content_store.get_attrs(course2_id.make_asset_key(category, filename))
self.assertEqual(len(course1_asset_attrs), len(course2_asset_attrs))
for key, value in six.iteritems(course1_asset_attrs):
if key in ['_id', 'filename', 'uploadDate', 'content_son', 'thumbnail_location']:
pass
else:
self.assertEqual(value, course2_asset_attrs[key])
def mock_requests_get(*args, **kwargs):
"""
Returns mock responses for the youtube API.
"""
# pylint: disable=unused-argument
response_transcript_list = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
response_transcript = textwrap.dedent("""
<transcript>
<text start="100" dur="100">subs #1</text>
<text start="200" dur="40">subs #2</text>
<text start="240" dur="140">subs #3</text>
</transcript>
""")
if kwargs == {'params': {'lang': 'en', 'v': 'good_id_2'}}:
return Mock(status_code=200, text='')
elif kwargs == {'params': {'type': 'list', 'v': 'good_id_2'}}:
return Mock(status_code=200, text=response_transcript_list, content=response_transcript_list)
elif kwargs == {'params': {'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}}:
return Mock(status_code=200, text=response_transcript, content=response_transcript)
return Mock(status_code=404, text='')
def get_url(handler_name, key_value, key_name='usage_key_string', kwargs=None):
"""
Helper function for getting HTML for a page in Studio and checking that it does not error.
"""
return reverse_url(handler_name, key_name, key_value, kwargs)
| agpl-3.0 | -5,645,957,303,986,287,000 | 44.264851 | 116 | 0.659922 | false |
mrroach/CentralServer | csrv/model/cards/runner/card01037.py | 1 | 2754 | from csrv.model import actions
from csrv.model import events
from csrv.model import timing_phases
from csrv.model.cards import card_info
from csrv.model.cards import event
class ChooseIce(timing_phases.BasePhase):
"""Choose a piece of ice for card01037."""
def __init__(self, game, player):
timing_phases.BasePhase.__init__(self, game, player, both_players=False)
def resolve(self, choice, response):
timing_phases.BasePhase.resolve(self, choice, response)
if choice:
self.end_phase()
class Card01037Action(actions.Action):
def __init__(self, game, player, card=None, card01037=None):
actions.Action.__init__(self, game, player, card=card)
self.card01037 = card01037
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
actions.Action.resolve(
self, response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.card01037.modify(self.card)
@property
def description(self):
if self.card.is_faceup:
return 'Add sentry, code gate, and barrier to %s' % self.card
else:
return 'Add sentry, code gate, and barrier to ice'
class Card01037(event.Event):
NAME = u'Card01037'
SET = card_info.CORE
NUMBER = 37
SIDE = card_info.RUNNER
FACTION = card_info.SHAPER
INFLUENCE = 4
UNIQUE = False
KEYWORDS = set([
card_info.MOD,
])
COST = 0
IMAGE_SRC = '01037.png'
def __init__(self, game, player):
event.Event.__init__(self, game, player)
self.ice = None
self.added = None
def build_actions(self):
event.Event.build_actions(self)
def play(self):
event.Event.play(self)
self.game.register_choice_provider(ChooseIce, self, 'modify_ice_actions')
self.game.insert_next_phase(ChooseIce(self.game, self.player))
def is_playable(self):
return event.Event.is_playable(self) and bool(self.modify_ice_actions())
def modify(self, ice):
self.game.deregister_choice_provider(ChooseIce, self, 'modify_ice_actions')
self.game.register_listener(events.RunnerTurnEnd, self)
self.ice = ice
types = set([card_info.BARRIER, card_info.SENTRY, card_info.CODE_GATE])
to_add = types - self.ice.KEYWORDS
self.ice.KEYWORDS.update(to_add)
self.added = to_add
def on_runner_turn_end(self, sender, event):
self.game.deregister_listener(events.RunnerTurnEnd, self)
if self.ice:
self.ice.KEYWORDS.difference_update(self.added)
self.ice = None
self.added = None
def modify_ice_actions(self):
actions = []
for server in self.game.corp.servers:
for ice in server.ice.cards:
actions.append(Card01037Action(
self.game, self.player, card=ice,
card01037=self))
return actions
| apache-2.0 | -3,939,459,415,255,302,000 | 27.6875 | 80 | 0.681917 | false |
retrogradeorbit/Pigo | pigo/gfx/TextureEngine.py | 1 | 2910 |
from Texture import Texture
from Tex import Tex
#from weakref import *
from PIL import Image
##
## @brief stores and keeps track of on card textures
##
## stores and keeps track of on card textures.
## TODO: resizes and caches to disk texture shrinking
HORIZONTAL=0
VERTICAL=1
import weakref
class TextureEngine:
def __init__(self):
# cache is a simple dictionary, with label as the keys and textures as the values
self.cache={}
self.textures=[]
def LoadImage(self, file, framesize=None, label=None, cache=True, sequence=HORIZONTAL):
# we call this and it loads the relevant image of disk. We can choose to cache it or not.
# it sets up for us a texture that is not resident, and a series of Tex's that are the frames
# of the animation, if such a value is passed in
print "LoadImage(",file,")"
if cache:
if not label:
label=file
if self.IsCached(label):
return self.GetCached(label)
im=Image.open(file)
texture=Texture(im,filename=file)
size=im.size
if framesize==None:
framesize=size
# generate tex frames
if sequence==HORIZONTAL:
for outer in range(size[1]/framesize[1]):
for inner in range(size[0]/framesize[0]):
#print inner,outer
#print ( texture, inner*framesize[0],outer*framesize[1],framesize[0],framesize[1])
texture.AddTexFrame(Tex( texture, inner*framesize[0],outer*framesize[1],framesize[0],framesize[1]))
elif sequence==VERTICAL:
for outer in range(size[0]/framesize[0]):
for inner in range(size[1]/framesize[1]):
texture.AddTexFrame(Tex( texture, outer*framesize[0],inner*framesize[1],framesize[0],framesize[1]))
# cache it if we are asked to
if cache:
self.Cache(texture, label)
return texture
def Cache(self, texture, label):
assert(label not in self.cache.keys()) # should not already have this label
self.cache[label]=texture # TODO: should this be a weak reference instead of a hard reference?
def Uncache(self, label):
del self.cache[label]
def IsCached(self,label):
return label in self.cache.keys()
def GetCached(self,label):
return self.cache[label]
def AddTexture(self,texture):
# weakref list
self.textures.append(weakref.ref(texture,lambda ref: self.textures.remove(ref)))
def GetTextureSizes(self):
# return a lits of texture sizes
return [text().texturesize for text in self.textures]
def GetSize(self):
return sum(self.GetTextureSizes)
| gpl-3.0 | 8,635,336,365,835,592,000 | 32.837209 | 119 | 0.594845 | false |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/tests/test_reproducibility.py | 1 | 3864 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import json
import os
import tempfile
import unittest
import torch
from . import test_binaries
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(
self,
name,
extra_flags=None,
delta=0.0001,
resume_checkpoint='checkpoint1.pt',
max_epoch=3,
):
def get_last_log_stats_containing_string(log_records, search_string):
for log_record in logs.records[::-1]:
if search_string in log_record.msg:
return json.loads(log_record.msg)
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with self.assertLogs() as logs:
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
# train epochs 1 and 2 together
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', str(max_epoch),
] + extra_flags,
)
train_log = get_last_log_stats_containing_string(logs.records, 'train_loss')
valid_log = get_last_log_stats_containing_string(logs.records, 'valid_loss')
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, resume_checkpoint),
os.path.join(data_dir, 'checkpoint_last.pt'),
)
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', str(max_epoch),
] + extra_flags,
)
train_res_log = get_last_log_stats_containing_string(logs.records, 'train_loss')
valid_res_log = get_last_log_stats_containing_string(logs.records, 'valid_loss')
for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']:
self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta)
for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']:
self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta)
def test_reproducibility(self):
self._test_reproducibility('test_reproducibility')
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_reproducibility_fp16(self):
self._test_reproducibility('test_reproducibility_fp16', [
'--fp16',
'--fp16-init-scale', '4096',
], delta=0.011)
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility('test_reproducibility_memory_efficient_fp16', [
'--memory-efficient-fp16',
'--fp16-init-scale', '4096',
])
def test_mid_epoch_reproducibility(self):
self._test_reproducibility(
'test_mid_epoch_reproducibility',
['--save-interval-updates', '3'],
resume_checkpoint='checkpoint_1_3.pt',
max_epoch=1,
)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,292,364,400,086,513,000 | 36.514563 | 97 | 0.5603 | false |
drvinceknight/Axelrod | axelrod/tests/test_tournament_manager.py | 1 | 2683 | import unittest
import axelrod
class TestTournamentManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_output_directory = './assets/'
cls.test_with_ecological = True
cls.test_tournament_name = 'test_tournament'
cls.test_file_name = 'test_file_name'
cls.test_file_extenstion = 'png'
cls.test_strategies = [axelrod.Defector, axelrod.Cooperator]
cls.test_players = [axelrod.Defector(), axelrod.Cooperator()]
cls.expected_output_file_path = './assets/test_file_name.png'
def test_init(self):
mgr = axelrod.TournamentManager(
self.test_output_directory,
self.test_with_ecological)
self.assertEqual(mgr.output_directory, self.test_output_directory)
self.assertEqual(mgr.tournaments, [])
self.assertEqual(mgr.with_ecological, self.test_with_ecological)
self.assertTrue(mgr.pass_cache)
def test_one_player_per_strategy(self):
mgr = axelrod.TournamentManager(
self.test_output_directory,
self.test_with_ecological)
players = mgr.one_player_per_strategy(self.test_strategies)
self.assertIsInstance(players[0], axelrod.Defector)
self.assertIsInstance(players[1], axelrod.Cooperator)
def test_output_file_path(self):
mgr = axelrod.TournamentManager(
self.test_output_directory,
self.test_with_ecological)
output_file_path = mgr.output_file_path(
self.test_file_name, self.test_file_extenstion)
self.assertEqual(output_file_path, self.expected_output_file_path)
def test_add_tournament(self):
mgr = axelrod.TournamentManager(
self.test_output_directory,
self.test_with_ecological)
mgr.add_tournament(
players=self.test_players, name=self.test_tournament_name)
self.assertEqual(len(mgr.tournaments), 1)
self.assertIsInstance(mgr.tournaments[0], axelrod.Tournament)
self.assertEqual(mgr.tournaments[0].name, self.test_tournament_name)
def test_valid_cache(self):
mgr = axelrod.TournamentManager(
output_directory=self.test_output_directory,
with_ecological=self.test_with_ecological, load_cache=False)
mgr.add_tournament(
players=self.test_players, name=self.test_tournament_name)
self.assertTrue(mgr.valid_cache(200))
mgr.deterministic_cache['test_key'] = 'test_value'
self.assertFalse(mgr.valid_cache(200))
mgr.cache_valid_for_turns = 500
self.assertFalse(mgr.valid_cache(200))
self.assertTrue(mgr.valid_cache(500))
| mit | 1,240,633,106,179,890,700 | 40.276923 | 76 | 0.660455 | false |
piqueserver/piqueserver | piqueserver/bansubscribe.py | 1 | 2912 | # Copyright (c) Mathias Kaerlev 2011-2012.
# This file is part of pyspades.
# pyspades is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyspades is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyspades. If not, see <http://www.gnu.org/licenses/>.
import json
from twisted.internet.task import LoopingCall
from twisted.internet.defer import DeferredList
from twisted.web.client import getPage
from twisted.logger import Logger
from piqueserver.networkdict import NetworkDict
from piqueserver.config import config, cast_duration
log = Logger()
# format is [{"ip" : "1.1.1.1", "reason : "blah"}, ...]
def validate_bansub_config(c):
if not isinstance(c, list):
return False
for item in c:
if not item.get('url') or not isinstance(item.get('whitelist'), list):
return False
return True
bans_config = config.section('bans')
bans_config_urls = bans_config.option('bansubscribe', default=[], validate=validate_bansub_config)
bans_config_interval = bans_config.option('bansubscribe_interval', default="5min",
cast=cast_duration)
class BanManager:
bans = None
new_bans = None
def __init__(self, protocol):
self.protocol = protocol
self.urls = [(entry.get('url'), entry.get('whitelist')) for entry in
bans_config_urls.get()]
self.loop = LoopingCall(self.update_bans)
self.loop.start(bans_config_interval.get(), now=True)
def update_bans(self):
self.new_bans = NetworkDict()
defers = []
for url, url_filter in self.urls:
defers.append(getPage(url.encode('utf8')).addCallback(self.got_bans,
url_filter))
DeferredList(defers).addCallback(self.bans_finished)
def got_bans(self, data, name_filter):
bans = json.loads(data)
for entry in bans:
name = entry.get('name', None)
if name is not None and name in name_filter:
continue
self.new_bans[str(entry['ip'])] = str(entry['reason'])
def bans_finished(self, _result):
self.bans = self.new_bans
self.new_bans = None
log.info("successfully updated bans from bansubscribe urls")
def get_ban(self, ip):
if self.bans is None:
return None
try:
return self.bans[ip]
except KeyError:
return None
| gpl-3.0 | 8,214,664,796,275,352,000 | 32.860465 | 98 | 0.640797 | false |
xu6148152/Binea_Python_Project | python_practice/batteries_included/collections_test.py | 1 | 1464 | # coding=utf-8
from collections import namedtuple, Counter
from collections import deque
from collections import defaultdict
import os
import sys
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
__author__ = 'xubinggui'
#namedtuple
Point = namedtuple('Point', ['x', 'y'])
p = Point(1, 2)
print p
print isinstance(p, tuple)
#deque
q = deque(['a', 'b', 'c'])
q.append('x')
q.appendleft('y')
print q
#defaultdict
#key 不存在时,返回默认值
dd = defaultdict(lambda: 'N/A')
print dd['key']
#OrderedDict
d = dict([('a', 1), ('b', 2), ('c', 3)])
print d
od = OrderedDict([('a', 1), ('b', 2), ('c', 3)])
print od
#FIFO dict
class LastUpdatedOrderedDict(OrderedDict):
def __init__(self, capacity):
super(LastUpdatedOrderedDict, self).__init__()
self._capacity = capacity
def __setitem__(self, key, value):
containsKey = 1 if key in self else 0
if len(self) - containsKey >= self._capacity:
last = self.popitem(last=False)
print 'remove:', last
if containsKey:
del self[key]
print 'set:', (key, value)
else:
print 'add:', (key, value)
OrderedDict.__setitem__(self, key, value)
#Counter
c = Counter()
for ch in 'programming':
c[ch] = c[ch] + 1
print c
# print sys.version
#collections模块提供了一些有用的集合类
| mit | -5,807,403,507,421,724,000 | 19.257143 | 54 | 0.620592 | false |
mikemike/SkypeBot | custom/server-old.py | 1 | 2002 | #!/sevabot
# -*- coding: utf-8 -*-
"""
Shows what server a site is on
"""
from __future__ import unicode_literals
import re
import os
import Skype4Py
import urllib2
import socket
from sevabot.bot.stateful import StatefulSkypeHandler
from sevabot.utils import ensure_unicode, get_chat_id
class ServerHandler(StatefulSkypeHandler):
"""
Skype message handler class for the conference call hosting.
"""
def __init__(self):
"""
Use `init` method to initialize a handler.
"""
def init(self, sevabot):
"""
Set-up our state. This is called every time module is (re)loaded.
:param skype: Handle to Skype4Py instance
"""
self.sevabot = sevabot
def handle_message(self, msg, status):
"""
Override this method to customize a handler.
"""
# If you are talking to yourself when testing
# Ignore non-sent messages (you get both SENDING and SENT events)
if status == "SENDING" or status == "SENT":
return
if status != "RECEIVED":
return False
body = ensure_unicode(msg.Body)
if len(body) == 0:
return False
# Check if we match any of our commands
a = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', body)
for text in a:
ip = socket.gethostbyname(a)
msg.Chat.SendMessage('IP is: ' + ip)
return True
return False
def shutdown():
"""
Called when the module is reloaded.
"""
def find_title(self, url):
"""
This finds the title when provided with a string of a URL.
"""
br = Browser()
br.open(url)
title = br.title()
if(title):
return title
else:
return False
# Export the instance to Sevabot
sevabot_handler = TitleHandler()
__all__ = ['sevabot_handler']
| gpl-2.0 | -3,084,206,064,392,355,300 | 23.120482 | 110 | 0.554945 | false |
stubbfel/fakeservices | src/fake_services/service/webservice/fake_http_request_handler.py | 1 | 2003 | __author__ = 'dev'
import re
from http.server import SimpleHTTPRequestHandler
HEADERS_HOST_PARAMETER_KEY_NAME = "Host"
REQUEST_LINE_ENCODING = "iso-8859-1"
HOST_PATTERN_KEY_NAME = "host_pattern"
RESPONSE_CONTENT_PATH_KEY_NAME = "response_content_path"
class FakeHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.__set_path_setting()
SimpleHTTPRequestHandler.do_GET(self)
def do_HEAD(self):
self.__set_path_setting()
SimpleHTTPRequestHandler.do_HEAD(self)
def do_POST(self):
self.command = "GET"
self.do_GET()
def __set_path_setting(self):
response_content_path = None
if self.server.requests_config is not None:
server_path = self.__get_server_path()
if server_path in self.server.requests_config:
request_config = self.server.requests_config[server_path]
response_content_path = self.__get_response_content_path(request_config)
if response_content_path is not None:
self.path = response_content_path
else:
self.path = "/404"
def __get_response_content_path(self, request_config):
sorted_configs = sorted(request_config, key=len, reverse=True)
server_host = self.__get_server_host()
for config in sorted_configs:
if HOST_PATTERN_KEY_NAME in config:
result = re.search(config[HOST_PATTERN_KEY_NAME], server_host)
if result is None:
continue
if RESPONSE_CONTENT_PATH_KEY_NAME in config:
return config[RESPONSE_CONTENT_PATH_KEY_NAME]
return None
def __get_server_path(self):
request_line = str(self.raw_requestline, REQUEST_LINE_ENCODING).rstrip('\r\n')
words = request_line.split()
if len(words) < 2:
return ""
return words[1]
def __get_server_host(self):
return self.headers[HEADERS_HOST_PARAMETER_KEY_NAME] | mit | 5,295,484,699,419,269,000 | 31.852459 | 88 | 0.622067 | false |
juhgiyo/pyserver | pyserver/network/async_tcp_client.py | 1 | 6653 | #!/usr/bin/python
"""
@file async_tcp_client.py
@author Woong Gyu La a.k.a Chris. <[email protected]>
<http://github.com/juhgiyo/pyserver>
@date March 10, 2016
@brief AsyncTcpClient Interface
@version 0.1
@section LICENSE
The MIT License (MIT)
Copyright (c) 2016 Woong Gyu La <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@section DESCRIPTION
AsyncTcpClient Class.
"""
import asyncore
import socket
from collections import deque
import threading
from .async_controller import AsyncController
from .callback_interface import *
from .server_conf import *
# noinspection PyDeprecation
from .preamble import *
import traceback
'''
Interfaces
variables
- hostname
- port
- addr = (hostname,port)
- callback
functions
- def send(data)
- def close() # close the socket
'''
class AsyncTcpClient(asyncore.dispatcher):
def __init__(self, hostname, port, callback, no_delay=True):
asyncore.dispatcher.__init__(self)
self.is_closing = False
self.callback = None
if callback is not None and isinstance(callback, ITcpSocketCallback):
self.callback = callback
else:
raise Exception('callback is None or not an instance of ITcpSocketCallback class')
self.hostname = hostname
self.port = port
self.addr = (hostname, port)
self.send_queue = deque() # thread-safe dequeue
self.transport = {'packet': None, 'type': PacketType.SIZE, 'size': SIZE_PACKET_LENGTH, 'offset': 0}
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
if no_delay:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.set_reuse_addr()
err = None
try:
self.connect((hostname, port))
AsyncController.instance().add(self)
except Exception as e:
err = e
finally:
def callback_connection():
if self.callback is not None:
self.callback.on_newconnection(self, err)
thread = threading.Thread(target=callback_connection)
thread.start()
def handle_connect(self):
pass
def handle_read(self):
try:
data = self.recv(self.transport['size'])
if data is None or len(data) == 0:
return
if self.transport['packet'] is None:
self.transport['packet'] = data
else:
self.transport['packet'] += data
read_size = len(data)
if read_size < self.transport['size']:
self.transport['offset'] += read_size
self.transport['size'] -= read_size
else:
if self.transport['type'] == PacketType.SIZE:
should_receive = Preamble.to_should_receive(self.transport['packet'])
if should_receive < 0:
preamble_offset = Preamble.check_preamble(self.transport['packet'])
self.transport['offset'] = len(self.transport['packet']) - preamble_offset
self.transport['size'] = preamble_offset
# self.transport['packet'] = self.transport['packet'][
# len(self.transport['packet']) - preamble_offset:]
self.transport['packet'] = self.transport['packet'][preamble_offset:]
return
self.transport = {'packet': None, 'type': PacketType.DATA, 'size': should_receive, 'offset': 0}
else:
receive_packet = self.transport
self.transport = {'packet': None, 'type': PacketType.SIZE, 'size': SIZE_PACKET_LENGTH, 'offset': 0}
self.callback.on_received(self, receive_packet['packet'])
except Exception as e:
print(e)
traceback.print_exc()
# def writable(self):
# return len(self.send_queue) != 0
def handle_write(self):
if len(self.send_queue) != 0:
send_obj = self.send_queue.popleft()
state = State.SUCCESS
try:
sent = asyncore.dispatcher.send(self, send_obj['data'][send_obj['offset']:])
if sent < len(send_obj['data']):
send_obj['offset'] = send_obj['offset'] + sent
self.send_queue.appendleft(send_obj)
return
except Exception as e:
print(e)
traceback.print_exc()
state = State.FAIL_SOCKET_ERROR
try:
if self.callback is not None:
self.callback.on_sent(self, state, send_obj['data'][SIZE_PACKET_LENGTH:])
except Exception as e:
print(e)
traceback.print_exc()
def close(self):
if not self.is_closing:
self.handle_close()
def handle_error(self):
if not self.is_closing:
self.handle_close()
def handle_close(self):
try:
self.is_closing = True
asyncore.dispatcher.close(self)
AsyncController.instance().discard(self)
if self.callback is not None:
self.callback.on_disconnect(self)
except Exception as e:
print(e)
traceback.print_exc()
def send(self, data):
self.send_queue.append({'data': Preamble.to_preamble_packet(len(data)) + data, 'offset': 0})
def gethostbyname(self, arg):
return self.socket.gethostbyname(arg)
def gethostname(self):
return self.socket.gethostname()
| mit | 631,969,208,172,865,900 | 35.756906 | 119 | 0.601383 | false |
CroissanceCommune/autonomie | autonomie/forms/training/trainer.py | 1 | 1970 | # -*- coding: utf-8 -*-
# * Authors:
# * TJEBBES Gaston <[email protected]>
# * Arezki Feth <[email protected]>;
# * Miotte Julien <[email protected]>;
import functools
import deform
from colanderalchemy import SQLAlchemySchemaNode
from autonomie import forms
from autonomie.forms.user.user import get_list_schema as get_user_list_schema
from autonomie.models.training.trainer import TrainerDatas
FORM_GRID = {
u"Profil Professionnel": (
(('specialty', 6),),
(('linkedin', 3), ('viadeo', 3)),
(('career', 6),),
(('qualifications', 6),),
(('background', 6),),
(('references', 6),),
),
u"Concernant votre activité de formation": (
(('motivation', 6), ),
(('approach', 6), ),
),
u"Un petit peu de vous": (
(('temperament', 6),),
(('indulgence', 6),),
(('sound', 6), ),
(('object_', 6),),
),
}
def customize_schema(schema):
"""
Customize the given TrainerDatas schema to setup specific widgets ...
"""
customize = functools.partial(forms.customize_field, schema)
for field in (
"specialty",
"career",
"qualifications",
"references",
"motivation",
"approach",
"temperament",
"indulgence",
"sound",
"object_",
):
customize(field, widget=deform.widget.TextAreaWidget())
return schema
def get_add_edit_trainerdatas_schema():
"""
Build the form schemas for adding/modifying a TrainerDatas entry
:returns: a colanderalchemy.SQLAlchemySchemaNode
"""
schema = SQLAlchemySchemaNode(
TrainerDatas,
excludes=('name', '_acl', 'user_id', 'active')
)
customize_schema(schema)
return schema
def get_list_schema():
"""
Build the form schema for trainers listing
:returns: a colanderalchemy.SQLAlchemySchemaNode
"""
schema = get_user_list_schema()
return schema
| gpl-3.0 | -7,347,418,329,904,043,000 | 23.924051 | 77 | 0.583037 | false |
eunchong/build | scripts/slave/recipe_modules/webrtc/gclient_config.py | 1 | 1707 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
ChromeInternalGitURL = DEPS['gclient'].config.ChromeInternalGitURL
ChromiumGitURL = DEPS['gclient'].config.ChromiumGitURL
@CONFIG_CTX(includes=['_webrtc', '_webrtc_limited'])
def webrtc(c):
pass
@CONFIG_CTX(includes=['webrtc'])
def webrtc_ios(c):
# WebRTC for iOS depends on the src/third_party/openmax_dl in Chromium, which
# is set to None for iOS. Because of this, sync Mac as well to get it.
c.target_os.add('mac')
c.target_os.add('ios')
@CONFIG_CTX(includes=['webrtc'])
def webrtc_valgrind(c):
"""Add Valgrind binaries to the gclient solution."""
c.solutions[0].custom_deps['src/chromium/src/third_party/valgrind'] = \
ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries')
@CONFIG_CTX()
def _webrtc(c):
"""Add the main solution for WebRTC standalone builds.
This needs to be in it's own configuration that is added first in the
dependency chain. Otherwise the webrtc-limited solution will end up as the
first solution in the gclient spec, which doesn't work.
"""
s = c.solutions.add()
s.name = 'src'
s.url = ChromiumGitURL(c, 'external', 'webrtc')
s.deps_file = 'DEPS'
c.got_revision_mapping['src'] = 'got_revision'
@CONFIG_CTX()
def _webrtc_limited(c):
"""Helper config for loading the webrtc-limited solution.
The webrtc-limited solution contains non-redistributable code.
"""
s = c.solutions.add()
s.name = 'webrtc-limited'
s.url = ChromeInternalGitURL(c, 'chrome', 'deps', 'webrtc-limited')
s.deps_file = 'DEPS'
| bsd-3-clause | -4,658,722,570,066,493,000 | 31.826923 | 79 | 0.710018 | false |
stevemarple/cameralogger | cameralogger/zwo.py | 1 | 6348 | # Cameralogger - record and decorate camera images for timelapses etc.
# Copyright (C) 2017 Steve Marple.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from atomiccreate import smart_open
import logging
from PIL import Image
import threading
import time
import zwoasi
from cameralogger import get_config_option
__author__ = 'Steve Marple'
__version__ = '0.2.1'
__license__ = 'MIT'
_settings_map = {
'auto_exp_max_brightness': ('AutoExpMaxBrightness', 'ASI_AUTO_MAX_BRIGHTNESS'),
'auto_exp_max_exp': ('AutoExpMaxExp', 'ASI_AUTO_MAX_EXP'),
'auto_exp_max_exp_us': ('AutoExpMaxExpMS', 'ASI_AUTO_MAX_EXP_MS'), # microsecond
'auto_exp_max_gain': ('AutoExpMaxGain', 'ASI_AUTO_MAX_GAIN'),
'bandwidth': ('BandWidth', 'ASI_BANDWIDTHOVERLOAD'),
'brightness': ('Brightness', 'ASI_BRIGHTNESS'),
'exposure': ('Exposure', 'ASI_EXPOSURE'),
'flip': ('Flip', 'ASI_FLIP'),
'gain': ('Gain', 'ASI_GAIN'),
'gamma': ('Gamma', 'ASI_GAMMA'),
'hardware_bin': ('HardwareBin', 'ASI_HARDWARE_BIN'),
'high_speed_mode': ('HighSpeedMode', 'ASI_HIGH_SPEED_MODE'),
'mono_bin': ('Mono bin', 'ASI_MONO_BIN'), # Yes,really with a space
'temperature': ('Temperature', 'ASI_TEMPERATURE'),
'wb_blue': ('WB_B', 'ASI_WB_B'),
'wb_red': ('WB_R', 'ASI_WB_R'),
}
class Camera(object):
"""ZwO ASI camera.
Uses the :class:`zwoasi.Camera` class."""
def __init__(self, config):
if zwoasi.zwolib is None:
# Must initialise the library
if config.has_option('camera', 'sdk'):
sdk_filename = config.get('camera', 'sdk')
else:
# Hope the user has set LD_LIBRARY_PATH or similar..
sdk_filename = None
zwoasi.init(sdk_filename)
num_cameras = zwoasi.get_num_cameras()
if num_cameras == 0:
raise Exception('no camera present')
if config.has_option('camera', 'model'):
id_ = config.get('camera', 'model')
try:
# Assume it is an integer
id_ = int(id_)
except ValueError:
# No it wasn't, must be the model name then
pass
else:
id_ = 0
self.camera = zwoasi.Camera(id_)
self.config = config
self.capture_image_lock = threading.Lock()
def __del__(self):
self.camera.stop_video_capture()
self.camera.close()
def apply_settings(self, section):
# Initialise
controls = self.camera.get_controls()
self.camera.start_video_capture()
# Read all camera controls defined in the config file
for setting in _settings_map:
value = get_config_option(self.config, section, setting)
if value is not None:
asi_control_name, asi_setting_name = _settings_map[setting]
default_value = controls[asi_control_name]['DefaultValue']
control_type = getattr(zwoasi, asi_setting_name)
logger.debug('set control value %s (%s) to %s', setting, asi_setting_name, value)
if value == 'auto':
self.camera.set_control_value(control_type, default_value, auto=True)
else:
# Cast value to same type as default_value
self.camera.set_control_value(control_type, type(default_value)(value), auto=False)
image_type = get_config_option(self.config, section, 'image_type')
if image_type is not None:
logger.debug('set image type to %s', image_type)
self.camera.set_image_type(getattr(zwoasi, 'ASI_IMG_' + image_type.upper()))
def capture_image(self, _):
logger.debug('capture_image: acquiring lock')
if self.capture_image_lock.acquire(False):
try:
logger.debug('capture_image: acquired lock')
t = time.time()
img_info = self.get_control_values()
img = Image.fromarray(self.camera.capture_video_frame()[:, :, ::-1]) # Swap from BGR order
img_info['DateTime'] = time.strftime('%Y-%m-%d %H:%M:%S+00:00', time.gmtime(t))
# Take CPU temperature as system temperature
img_info['SystemTemperature'] = float('NaN')
with open('/sys/class/thermal/thermal_zone0/temp') as f:
img_info['SystemTemperature'] = float(f.read().strip()) / 1000
if self.config.has_option('camera', 'sensor_temperature_file'):
with smart_open(self.config.get('camera', 'sensor_temperature_file'), 'w') as fh:
if 'SensorTemperature' in img_info:
fh.write(str(img_info['SensorTemperature']))
fh.write('\n')
else:
fh.write('NaN\n')
return img, img_info, t
finally:
logging.debug('capture_image: released lock')
self.capture_image_lock.release()
else:
logger.warning('capture_image: could not acquire lock')
raise Exception('could not acquire lock')
def get_control_values(self):
controls = self.camera.get_controls()
r = {}
for k in controls:
r[k] = self.camera.get_control_value(controls[k]['ControlType'])[0]
# Fix up certain keys
r['Exposure_s'] = r['Exposure'] / 1000000.0
if 'Temperature' in r:
r['SensorTemperature'] = r['Temperature'] / 10.0
if 'Flip' in r:
r['Flip'] = {0: 'None', 1: 'Horizontal', 2: 'Vertical', 3: 'Both'}[r['Flip']]
return r
logger = logging.getLogger(__name__)
| gpl-3.0 | -2,069,602,927,163,728,600 | 38.924528 | 107 | 0.57908 | false |
gopythongo/aptly-api-client | aptly_api/tests/client.py | 1 | 4713 | # -* encoding: utf-8 *-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from typing import Any, cast
from unittest.case import TestCase
import requests
import requests_mock
from aptly_api import Client as AptlyClient
# as we're testing the individual parts, this is rather simple
from aptly_api.base import AptlyAPIException
class ClientTests(TestCase):
def __init__(self, *args: Any) -> None:
super().__init__(*args)
self.client = AptlyClient("http://test/")
def test_instantiate(self) -> None:
cl = AptlyClient("http://test/")
self.assertEqual(
str(cl),
"Client (Aptly API Client) <http://test/>"
)
@requests_mock.Mocker(kw='rmock')
def test_api_subdir_get(self, *, rmock: requests_mock.Mocker) -> None:
# register mock:// scheme with urllib.parse
import urllib.parse
urllib.parse.uses_netloc += ['mock']
urllib.parse.uses_relative += ['mock']
urllib.parse.uses_fragment += ['mock']
urllib.parse.uses_params += ['mock']
cl = AptlyClient("mock://test/basedir/")
rmock.get("mock://test/basedir/api/test", status_code=200, text='')
cl.files.do_get("api/test")
self.assertTrue(rmock.called)
def test_error_no_error(self) -> None:
class MockResponse:
def __init__(self, status_code: int = 200) -> None:
self.status_code = status_code
self.assertEqual(
self.client.files._error_from_response(cast(requests.Response, MockResponse())),
"no error (status 200)"
)
def test_error_no_json(self) -> None:
adapter = requests_mock.Adapter()
adapter.register_uri("GET", "mock://test/api", status_code=400, text="this is not json", reason="test")
session = requests.session()
session.mount("mock", adapter)
resp = session.get("mock://test/api")
self.assertEqual(
self.client.files._error_from_response(resp),
"400 test this is not json"
)
def test_error_dict(self) -> None:
adapter = requests_mock.Adapter()
adapter.register_uri("GET", "mock://test/api", status_code=400, text='{"error": "error", "meta": "meta"}',
reason="test")
session = requests.session()
session.mount("mock", adapter)
resp = session.get("mock://test/api")
self.assertEqual(
self.client.files._error_from_response(resp),
"400 - test - error (meta)"
)
def test_error_list(self) -> None:
adapter = requests_mock.Adapter()
adapter.register_uri("GET", "mock://test/api", status_code=400, text='[{"error": "error", "meta": "meta"}]',
reason="test")
session = requests.session()
session.mount("mock", adapter)
resp = session.get("mock://test/api")
self.assertEqual(
self.client.files._error_from_response(resp),
"400 - test - error (meta)"
)
@requests_mock.Mocker(kw='rmock')
def test_error_get(self, *, rmock: requests_mock.Mocker) -> None:
rmock.register_uri("GET", "mock://test/api", status_code=400, text='[{"error": "error", "meta": "meta"}]',
reason="test")
with self.assertRaises(AptlyAPIException):
self.client.files.do_get("mock://test/api")
@requests_mock.Mocker(kw='rmock')
def test_error_post(self, *, rmock: requests_mock.Mocker) -> None:
rmock.register_uri("POST", "mock://test/api", status_code=400, text='[{"error": "error", "meta": "meta"}]',
reason="test")
with self.assertRaises(AptlyAPIException):
self.client.files.do_post("mock://test/api")
@requests_mock.Mocker(kw='rmock')
def test_error_put(self, *, rmock: requests_mock.Mocker) -> None:
rmock.register_uri("PUT", "mock://test/api", status_code=400, text='[{"error": "error", "meta": "meta"}]',
reason="test")
with self.assertRaises(AptlyAPIException):
self.client.files.do_put("mock://test/api")
@requests_mock.Mocker(kw='rmock')
def test_error_delete(self, *, rmock: requests_mock.Mocker) -> None:
rmock.register_uri("DELETE", "mock://test/api", status_code=400, text='[{"error": "error", "meta": "meta"}]',
reason="test")
with self.assertRaises(AptlyAPIException):
self.client.files.do_delete("mock://test/api")
| bsd-3-clause | 3,883,082,745,389,953,000 | 39.282051 | 117 | 0.584129 | false |
jlinn/pylastica | tests/query/test_querystring.py | 1 | 2345 | __author__ = 'Joe Linn'
import unittest
import hashlib
import random
import pylastica
from tests.base import Base
class QueryStringTest(unittest.TestCase, Base):
def test_search_multiple_fields(self):
string = hashlib.md5(str(random.random())).hexdigest()
query = pylastica.query.QueryString(string)
expected = {'query': string}
self.assertEqual({'query_string': expected}, query.to_dict())
fields = [hashlib.md5(str(random.random())).hexdigest() for i in range(random.randint(0, 12) + 1)]
query.set_fields(fields)
expected['fields'] = fields
self.assertEqual({'query_string': expected}, query.to_dict())
for val in [True, False]:
query.set_use_dis_max(val)
expected['use_dis_max'] = val
self.assertEqual({'query_string': expected}, query.to_dict())
def test_search(self):
client = self._get_client()
index = client.get_index('test')
index.create(options=True)
index.settings.set_number_of_replicas(0)
doc_type = index.get_doc_type('helloworld')
doc_type.add_document(pylastica.Document(1, {'email': '[email protected]', 'username': 'bobloblaw', 'test': ['2', '3', '5']}))
index.refresh()
query = pylastica.query.QueryString()
query.set_query('test*')
result_set = doc_type.search(query)
self.assertEqual(1, len(result_set))
index.delete()
def test_search_fields(self):
index = self._create_index()
doc_type = index.get_doc_type('test')
doc_type.add_document(pylastica.Document(1, {'title': 'hello world', 'firstname': 'Joe', 'lastname': 'Linn', 'price': '102', 'year': '2013'}))
index.refresh()
query = pylastica.query.QueryString()
query.set_query('lin*').set_default_field('title')
query.set_fields(['title', 'firstname', 'lastname', 'price', 'year'])
result_set = doc_type.search(query)
self.assertEqual(1, len(result_set))
index.delete()
def test_set_default_operator(self):
operator = 'AND'
query = pylastica.query.QueryString('test')
query.set_default_operator(operator)
self.assertEqual(query.to_dict()['query_string']['default_operator'], operator)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,380,882,597,778,277,600 | 36.222222 | 150 | 0.613646 | false |
EUDAT-B2SHARE/invenio-old | modules/bibcirculation/lib/bibcirculation_model.py | 1 | 12269 | # -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02D111-1307, USA.
"""
bibcirculation database models.
"""
# General imports.
from invenio.sqlalchemyutils import db
# Create your models here.
from invenio.bibedit_model import Bibrec
from invenio.bibcirculation_receivers import \
post_handler_demosite_populate
from invenio.demosite_manager import populate as demosite_populate
from invenio.signalutils import post_command
post_command.connect(post_handler_demosite_populate, sender=demosite_populate)
class CrcBORROWER(db.Model):
"""Represents a CrcBORROWER record."""
def __init__(self):
pass
__tablename__ = 'crcBORROWER'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
ccid = db.Column(db.Integer(15, unsigned=True), nullable=True,
unique=True, server_default=None)
name = db.Column(db.String(255), nullable=False,
server_default='', index=True)
email = db.Column(db.String(255), nullable=False,
server_default='', index=True)
phone = db.Column(db.String(60), nullable=True)
address = db.Column(db.String(60), nullable=True)
mailbox = db.Column(db.String(30), nullable=True)
borrower_since = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
borrower_until = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
notes = db.Column(db.Text, nullable=True)
class CrcLIBRARY(db.Model):
"""Represents a CrcLIBRARY record."""
def __init__(self):
pass
__tablename__ = 'crcLIBRARY'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
name = db.Column(db.String(80), nullable=False,
server_default='')
address = db.Column(db.String(255), nullable=False,
server_default='')
email = db.Column(db.String(255), nullable=False,
server_default='')
phone = db.Column(db.String(30), nullable=False,
server_default='')
type = db.Column(db.String(30), nullable=False,
server_default='main')
notes = db.Column(db.Text, nullable=True)
class CrcITEM(db.Model):
"""Represents a CrcITEM record."""
def __init__(self):
pass
__tablename__ = 'crcITEM'
barcode = db.Column(db.String(30), nullable=False,
server_default='',
primary_key=True)
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id), nullable=False,
server_default='0')
id_crcLIBRARY = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(CrcLIBRARY.id), nullable=False,
server_default='0')
collection = db.Column(db.String(60), nullable=True)
location = db.Column(db.String(60), nullable=True)
description = db.Column(db.String(60), nullable=True)
loan_period = db.Column(db.String(30), nullable=False,
server_default='')
status = db.Column(db.String(20), nullable=False,
server_default='')
expected_arrival_date = db.Column(db.String(60), nullable=False,
server_default='')
creation_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
modification_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
number_of_requests = db.Column(db.Integer(3, unsigned=True),
nullable=False,server_default='0')
class CrcILLREQUEST(db.Model):
"""Represents a CrcILLREQUEST record."""
def __init__(self):
pass
__tablename__ = 'crcILLREQUEST'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
id_crcBORROWER = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(CrcBORROWER.id),
nullable=False,
server_default='0')
barcode = db.Column(db.String(30), db.ForeignKey(CrcITEM.barcode),
nullable=False,
server_default='')
period_of_interest_from = db.Column(db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00')
period_of_interest_to = db.Column(db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00')
id_crcLIBRARY = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(CrcLIBRARY.id), nullable=False,
server_default='0')
request_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
expected_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
arrival_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
due_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
return_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
status = db.Column(db.String(20), nullable=False,
server_default='')
cost = db.Column(db.String(30), nullable=False,
server_default='')
budget_code = db.Column(db.String(60), nullable=False,
server_default='')
item_info = db.Column(db.Text, nullable=True)
request_type = db.Column(db.Text, nullable=True)
borrower_comments = db.Column(db.Text, nullable=True)
only_this_edition = db.Column(db.String(10), nullable=False,
server_default='')
library_notes = db.Column(db.Text, nullable=True)
overdue_letter_number = db.Column(db.Integer(3, unsigned=True),
nullable=False, server_default='0')
overdue_letter_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
borrower = db.relationship(CrcBORROWER, backref='illrequests')
item = db.relationship(CrcITEM, backref='illrequests')
library = db.relationship(CrcLIBRARY, backref='illrequests')
class CrcLOAN(db.Model):
"""Represents a CrcLOAN record."""
def __init__(self):
pass
__tablename__ = 'crcLOAN'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
id_crcBORROWER = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(CrcBORROWER.id), nullable=False, server_default='0')
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id),
nullable=False, server_default='0')
barcode = db.Column(db.String(30), db.ForeignKey(CrcITEM.barcode), nullable=False,
server_default='')
loaned_on = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
returned_on = db.Column(db.Date, nullable=False,
server_default='0000-00-00')
due_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
number_of_renewals = db.Column(db.Integer(3, unsigned=True), nullable=False,
server_default='0')
overdue_letter_number = db.Column(db.Integer(3, unsigned=True), nullable=False,
server_default='0')
overdue_letter_date = db.Column(db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00')
status = db.Column(db.String(20), nullable=False,
server_default='')
type = db.Column(db.String(20), nullable=False,
server_default='')
notes = db.Column(db.Text, nullable=True)
borrower = db.relationship(CrcBORROWER, backref='loans')
bibrec = db.relationship(Bibrec, backref='loans')
item = db.relationship(CrcITEM, backref='loans')
class CrcLOANREQUEST(db.Model):
"""Represents a CrcLOANREQUEST record."""
def __init__(self):
pass
__tablename__ = 'crcLOANREQUEST'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
id_crcBORROWER = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(CrcBORROWER.id), nullable=False, server_default='0')
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id),
nullable=False, server_default='0')
barcode = db.Column(db.String(30), db.ForeignKey(CrcITEM.barcode), nullable=False,
server_default='')
period_of_interest_from = db.Column(db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00')
period_of_interest_to = db.Column(db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00')
status = db.Column(db.String(20), nullable=False,
server_default='')
notes = db.Column(db.Text, nullable=True)
request_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
borrower = db.relationship(CrcBORROWER, backref='loanrequests')
bibrec = db.relationship(Bibrec, backref='loanrequests')
item = db.relationship(CrcITEM, backref='loanrequests')
class CrcVENDOR(db.Model):
"""Represents a CrcVENDOR record."""
def __init__(self):
pass
__tablename__ = 'crcVENDOR'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
name = db.Column(db.String(80), nullable=False,
server_default='')
address = db.Column(db.String(255), nullable=False,
server_default='')
email = db.Column(db.String(255), nullable=False,
server_default='')
phone = db.Column(db.String(30), nullable=False,
server_default='')
notes = db.Column(db.Text, nullable=True)
class CrcPURCHASE(db.Model):
"""Represents a CrcPURCHASE record."""
def __init__(self):
pass
__tablename__ = 'crcPURCHASE'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
id_bibrec = db.Column(db.MediumInteger(8, unsigned=True),
db.ForeignKey(Bibrec.id),
nullable=False, server_default='0')
id_crcVENDOR = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(CrcVENDOR.id), nullable=False, server_default='0')
ordered_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
expected_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00')
price = db.Column(db.String(20), nullable=False,
server_default='0')
status = db.Column(db.String(20), nullable=False,
server_default='')
notes = db.Column(db.Text, nullable=True)
bibrec = db.relationship(Bibrec, backref='purchases')
vendor = db.relationship(CrcVENDOR, backref='purchases')
__all__ = ['CrcBORROWER',
'CrcLIBRARY',
'CrcITEM',
'CrcILLREQUEST',
'CrcLOAN',
'CrcLOANREQUEST',
'CrcVENDOR',
'CrcPURCHASE']
| gpl-2.0 | 6,992,625,803,890,977,000 | 42.049123 | 86 | 0.617247 | false |
fbradyirl/home-assistant | homeassistant/components/zwave/discovery_schemas.py | 1 | 12219 | """Z-Wave discovery schemas."""
from . import const
DEFAULT_VALUES_SCHEMA = {
"power": {
const.DISC_SCHEMAS: [
{
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_POWER],
},
{
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_METER],
const.DISC_INDEX: [const.INDEX_METER_POWER],
},
],
const.DISC_OPTIONAL: True,
}
}
DISCOVERY_SCHEMAS = [
{
const.DISC_COMPONENT: "binary_sensor",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_THERMOSTAT,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_BINARY],
const.DISC_TYPE: const.TYPE_BOOL,
const.DISC_GENRE: const.GENRE_USER,
},
"off_delay": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [9],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "climate",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_SETPOINT]
},
"temperature": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_TEMPERATURE],
const.DISC_OPTIONAL: True,
},
"mode": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_MODE],
const.DISC_OPTIONAL: True,
},
"fan_mode": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_FAN_MODE],
const.DISC_OPTIONAL: True,
},
"operating_state": {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_OPERATING_STATE
],
const.DISC_OPTIONAL: True,
},
"fan_action": {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_FAN_ACTION
],
const.DISC_OPTIONAL: True,
},
"zxt_120_swing_mode": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [33],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "cover", # Rollershutter
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_GENRE: const.GENRE_USER,
},
"open": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_BRIGHT],
const.DISC_OPTIONAL: True,
},
"close": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DIM],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "cover", # Garage Door Switch
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY],
const.DISC_GENRE: const.GENRE_USER,
}
},
),
},
{
const.DISC_COMPONENT: "cover", # Garage Door Barrier
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_BARRIER_OPERATOR],
const.DISC_INDEX: [const.INDEX_BARRIER_OPERATOR_LABEL],
}
},
),
},
{
const.DISC_COMPONENT: "fan",
const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_SWITCH_MULTILEVEL],
const.DISC_SPECIFIC_DEVICE_CLASS: [const.SPECIFIC_TYPE_FAN_SWITCH],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL],
const.DISC_TYPE: const.TYPE_BYTE,
}
},
),
},
{
const.DISC_COMPONENT: "light",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_REMOTE,
],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_NOT_USED,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL],
const.DISC_TYPE: const.TYPE_BYTE,
},
"dimming_duration": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DURATION],
const.DISC_OPTIONAL: True,
},
"color": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR],
const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_COLOR],
const.DISC_OPTIONAL: True,
},
"color_channels": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR],
const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_CHANNELS],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "lock",
const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_ENTRY_CONTROL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_DOOR_LOCK,
const.SPECIFIC_TYPE_ADVANCED_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_LOCKBOX,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_DOOR_LOCK],
const.DISC_INDEX: [const.INDEX_DOOR_LOCK_LOCK],
},
"access_control": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_ACCESS_CONTROL],
const.DISC_OPTIONAL: True,
},
"alarm_type": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_TYPE],
const.DISC_OPTIONAL: True,
},
"alarm_level": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_LEVEL],
const.DISC_OPTIONAL: True,
},
"v2btze_advanced": {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [12],
const.DISC_OPTIONAL: True,
},
},
),
},
{
const.DISC_COMPONENT: "sensor",
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
const.COMMAND_CLASS_ALARM,
const.COMMAND_CLASS_SENSOR_ALARM,
const.COMMAND_CLASS_INDICATOR,
],
const.DISC_GENRE: const.GENRE_USER,
}
},
),
},
{
const.DISC_COMPONENT: "switch",
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_GENERIC_CONTROLLER,
const.GENERIC_TYPE_SWITCH_REMOTE,
const.GENERIC_TYPE_REPEATER_SLAVE,
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_WALL_CONTROLLER,
],
const.DISC_VALUES: dict(
DEFAULT_VALUES_SCHEMA,
**{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY],
const.DISC_TYPE: const.TYPE_BOOL,
const.DISC_GENRE: const.GENRE_USER,
}
},
),
},
]
| apache-2.0 | 7,222,602,960,342,554,000 | 37.914013 | 88 | 0.502905 | false |
tmenjo/cinder-2015.1.0 | cinder/db/sqlalchemy/api.py | 1 | 124586 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import datetime as dt
import functools
import sys
import threading
import time
import uuid
import warnings
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import osprofiler.sqlalchemy
import six
import sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, joinedload_all
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import true
from sqlalchemy.sql import func
from cinder.common import sqlalchemyutils
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _, _LW, _LE, _LI
CONF = cfg.CONF
CONF.import_group("profiler", "cinder.service")
LOG = logging.getLogger(__name__)
options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite')
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
if CONF.profiler.profiler_enabled:
if CONF.profiler.trace_sqlalchemy:
osprofiler.sqlalchemy.add_tracing(sqlalchemy,
_FACADE.get_engine(),
"db")
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def dispose_engine():
get_engine().dispose()
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
def wrapper(context, volume_id, *args, **kwargs):
volume_get(context, volume_id)
return f(context, volume_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_snapshot_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and snapshot_id as
their first two arguments.
"""
def wrapper(context, snapshot_id, *args, **kwargs):
snapshot_get(context, snapshot_id)
return f(context, snapshot_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warn(_LW("Deadlock detected when running "
"'%(func_name)s': Retrying..."),
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def _sync_volumes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(volumes, _gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(snapshots, _gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(backups, _gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'backups'
return {key: backups}
def _sync_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(_junk, vol_gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
(_junk, snap_gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: vol_gigs + snap_gigs}
def _sync_consistencygroups(context, project_id, session,
volume_type_id=None,
volume_type_name=None):
(_junk, groups) = _consistencygroup_data_get_for_project(
context, project_id, session=session)
key = 'consistencygroups'
return {key: groups}
def _sync_backup_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
key = 'backup_gigabytes'
(_junk, backup_gigs) = _backup_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: backup_gigs}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes
}
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
service_ref.delete(session=session)
@require_admin_context
def _service_get(context, service_id, session=None):
result = model_query(
context,
models.Service,
session=session).\
filter_by(id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic, disabled=None):
query = model_query(
context, models.Service, read_deleted="no").\
filter_by(topic=topic)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
result = model_query(
context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
if not result:
raise exception.ServiceNotFound(service_id=None)
return result
@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return model_query(context, models.Service,
func.coalesce(sort_value, 0),
session=session, read_deleted="no").\
filter_by(topic=topic).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
session = get_session()
with session.begin():
service_ref.save(session)
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
if ('disabled' in values):
service_ref['modified_at'] = timeutils.utcnow()
service_ref['updated_at'] = literal_column('updated_at')
service_ref.update(values)
return service_ref
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _dict_with_extra_specs(inst_type_query):
"""Convert type query result to dict with extra_spec and rate_limit.
Takes a volume type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
###################
@require_admin_context
def iscsi_target_count_by_host(context, host):
return model_query(context, models.IscsiTarget).\
filter_by(host=host).\
count()
@require_admin_context
def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget()
for (key, value) in values.iteritems():
iscsi_target_ref[key] = value
session = get_session()
try:
with session.begin():
session.add(iscsi_target_ref)
return iscsi_target_ref
except db_exc.DBDuplicateEntry:
LOG.debug("Can not add duplicate IscsiTarget.")
return None
###################
@require_context
def _quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get(context, project_id, resource):
return _quota_get(context, project_id, resource)
@require_context
def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
session = get_session()
with session.begin():
quota_ref.save(session)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.hard_limit = limit
return quota_ref
@require_admin_context
def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
###################
@require_context
def _quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
def quota_class_get(context, class_name, resource):
return _quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass,
read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
session = get_session()
with session.begin():
quota_class_ref.save(session)
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.hard_limit = limit
return quota_class_ref
@require_admin_context
def quota_class_destroy(context, class_name, resource):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.delete(session=session)
@require_admin_context
def quota_class_destroy_all_by_name(context, class_name):
session = get_session()
with session.begin():
quota_classes = model_query(context, models.QuotaClass,
session=session, read_deleted="no").\
filter_by(class_name=class_name).\
all()
for quota_class_ref in quota_classes:
quota_class_ref.delete(session=session)
###################
@require_context
def quota_usage_get(context, project_id, resource):
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
@require_context
def quota_usage_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
return result
@require_admin_context
def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.save(session=session)
return quota_usage_ref
###################
def _reservation_create(context, uuid, usage, project_id, resource, delta,
expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
# Get the current usages
usages = _get_quota_usages(context, session, project_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
project_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0:
refresh = True
elif max_age and usages[resource].updated_at is not None and (
(usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age):
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
volume_type_id = getattr(resources[resource],
'volume_type_id', None)
volume_type_name = getattr(resources[resource],
'volume_type_name', None)
updates = sync(elevated, project_id,
volume_type_id=volume_type_id,
volume_type_name=volume_type_name,
session=session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = _quota_usage_create(
elevated,
project_id,
res,
0, 0,
until_refresh or None,
session=session
)
# Update the usage
usages[res].in_use = in_use
usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + usages[r].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
overs = [r for r, delta in deltas.items()
if quotas[r] >= 0 and delta >= 0 and
quotas[r] < delta + usages[r].total]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for resource, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
usages[resource],
project_id,
resource, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
usages[resource].reserved += delta
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s") % unders)
if overs:
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
return reservations
def _quota_reservations(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update').\
all()
@require_context
@_retry_on_deadlock
def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
for reservation in _quota_reservations(session, context, reservations):
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation.delete(session=session)
@require_context
@_retry_on_deadlock
def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
for reservation in _quota_reservations(session, context, reservations):
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation.delete(session=session)
@require_admin_context
@_retry_on_deadlock
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
quota_usages = model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_usage_ref in quota_usages:
quota_usage_ref.delete(session=session)
reservations = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for reservation_ref in reservations:
reservation_ref.delete(session=session)
@require_admin_context
@_retry_on_deadlock
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter(models.Reservation.expire < current_time).\
all()
if results:
for reservation in results:
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
reservation.usage.save(session=session)
reservation.delete(session=session)
###################
@require_admin_context
def volume_attach(context, values):
volume_attachment_ref = models.VolumeAttachment()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_attachment_ref.update(values)
session = get_session()
with session.begin():
volume_attachment_ref.save(session=session)
return volume_attachment_get(context, values['id'],
session=session)
@require_admin_context
def volume_attached(context, attachment_id, instance_uuid, host_name,
mountpoint, attach_mode='rw'):
"""This method updates a volume attachment entry.
This function saves the information related to a particular
attachment for a volume. It also updates the volume record
to mark the volume as attached.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref['mountpoint'] = mountpoint
volume_attachment_ref['attach_status'] = 'attached'
volume_attachment_ref['instance_uuid'] = instance_uuid
volume_attachment_ref['attached_host'] = host_name
volume_attachment_ref['attach_time'] = timeutils.utcnow()
volume_attachment_ref['attach_mode'] = attach_mode
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'],
session=session)
volume_attachment_ref.save(session=session)
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref.save(session=session)
return volume_ref
@require_context
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
if is_admin_context(context):
values['volume_admin_metadata'] = \
_metadata_refs(values.get('admin_metadata'),
models.VolumeAdminMetadata)
elif values.get('volume_admin_metadata'):
del values['volume_admin_metadata']
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_ref.update(values)
session = get_session()
with session.begin():
session.add(volume_ref)
return _volume_get(context, values['id'], session=session)
@require_admin_context
def volume_data_get_for_host(context, host, count_only=False):
if count_only:
result = model_query(context,
func.count(models.Volume.id),
read_deleted="no").\
filter_by(host=host).\
first()
return result[0] or 0
else:
result = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no").\
filter_by(host=host).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _volume_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _backup_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Backup.id),
func.sum(models.Backup.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_data_get_for_project(context, project_id, volume_type_id=None):
return _volume_data_get_for_project(context, project_id, volume_type_id)
@require_admin_context
def finish_volume_migration(context, src_vol_id, dest_vol_id):
"""Copy almost all columns from dest to source."""
session = get_session()
with session.begin():
src_volume_ref = _volume_get(context, src_vol_id, session=session)
dest_volume_ref = _volume_get(context, dest_vol_id, session=session)
# NOTE(rpodolyaka): we should copy only column values, while model
# instances also have relationships attributes, which
# should be ignored
def is_column(inst, attr):
return attr in inst.__class__.__table__.columns
for key, value in dest_volume_ref.iteritems():
if key == 'id' or not is_column(dest_volume_ref, key):
continue
elif key == 'migration_status':
value = None
elif key == '_name_id':
value = dest_volume_ref['_name_id'] or dest_volume_ref['id']
setattr(src_volume_ref, key, value)
@require_admin_context
@_retry_on_deadlock
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
with session.begin():
model_query(context, models.Volume, session=session).\
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.IscsiTarget, session=session).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
model_query(context, models.VolumeMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeAdminMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.Transfer, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
@require_admin_context
def volume_detach(context, attachment_id):
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref['attach_status'] = 'detaching'
volume_attachment_ref.save(session=session)
@require_admin_context
def volume_detached(context, volume_id, attachment_id):
"""This updates a volume attachment and marks it as detached.
This method also ensures that the volume entry is correctly
marked as either still attached/in-use or detached/available
if this was the last detachment made.
"""
session = get_session()
with session.begin():
attachment = volume_attachment_get(context, attachment_id,
session=session)
# If this is already detached, attachment will be None
if attachment:
now = timeutils.utcnow()
attachment['attach_status'] = 'detached'
attachment['detach_time'] = now
attachment['deleted'] = True
attachment['deleted_at'] = now
attachment.save(session=session)
attachment_list = volume_attachment_get_used_by_volume_id(
context, volume_id, session=session)
remain_attachment = False
if attachment_list and len(attachment_list) > 0:
remain_attachment = True
volume_ref = _volume_get(context, volume_id, session=session)
if not remain_attachment:
# Hide status update from user if we're performing volume migration
# or uploading it to image
if (not volume_ref['migration_status'] and
not (volume_ref['status'] == 'uploading')):
volume_ref['status'] = 'available'
volume_ref['attach_status'] = 'detached'
volume_ref.save(session=session)
else:
# Volume is still attached
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref.save(session=session)
@require_context
def _volume_get_query(context, session=None, project_only=False):
if is_admin_context(context):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_type.extra_specs')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_type.extra_specs')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
@require_context
def _volume_get(context, volume_id, session=None):
result = _volume_get_query(context, session=session, project_only=True).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
def volume_attachment_get(context, attachment_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(id=attachment_id).\
first()
if not result:
raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' %
attachment_id)
return result
@require_context
def volume_attachment_get_used_by_volume_id(context, volume_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter(models.VolumeAttachment.attach_status != 'detached').\
all()
return result
@require_context
def volume_attachment_get_by_host(context, volume_id, host):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter_by(attached_host=host).\
filter(models.VolumeAttachment.attach_status != 'detached').\
first()
return result
@require_context
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter_by(instance_uuid=instance_uuid).\
filter(models.VolumeAttachment.attach_status != 'detached').\
first()
return result
@require_context
def volume_get(context, volume_id):
return _volume_get(context, volume_id)
@require_admin_context
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None,
filters=None):
"""Retrieves all volumes.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_admin_context
def volume_get_all_by_host(context, host, filters=None):
"""Retrieves all volumes hosted on a host.
:param context: context to query under
:param host: host for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
# As a side effect of the introduction of pool-aware scheduler,
# newly created volumes will have pool information appended to
# 'host' field of a volume record. So a volume record in DB can
# now be either form below:
# Host
# Host#Pool
if host and isinstance(host, basestring):
session = get_session()
with session.begin():
host_attr = getattr(models.Volume, 'host')
conditions = [host_attr == host,
host_attr.op('LIKE')(host + '#%')]
query = _volume_get_query(context).filter(or_(*conditions))
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
elif not host:
return []
@require_context
def volume_get_all_by_group(context, group_id, filters=None):
"""Retrieves all volumes associated with the group_id.
:param context: context to query under
:param group_id: group ID for all volumes being retrieved
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
query = _volume_get_query(context).filter_by(consistencygroup_id=group_id)
if filters:
query = _process_volume_filters(query, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
@require_context
def volume_get_all_by_project(context, project_id, marker, limit,
sort_keys=None, sort_dirs=None, filters=None):
""""Retrieves all volumes in a project.
If no sort parameters are specified then the returned volumes are sorted
first by the 'created_at' key and then by the 'id' key in descending
order.
:param context: context to query under
:param project_id: project for all volumes being retrieved
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
authorize_project_context(context, project_id)
# Add in the project filter without modifying the given filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_keys, sort_dirs, filters)
# No volumes would match, return empty list
if query is None:
return []
return query.all()
def _generate_paginate_query(context, session, marker, limit, sort_keys,
sort_dirs, filters):
"""Generate the query to include the filters and the paginate options.
Returns a query with sorting / pagination criteria added or None
if the given filters will not yield any results.
:param context: context to query under
:param session: the session to use
:param marker: the last item of the previous page; we returns the next
results after this value.
:param limit: maximum number of items to return
:param sort_keys: list of attributes by which results should be sorted,
paired with corresponding item in sort_dirs
:param sort_dirs: list of directions in which results should be sorted,
paired with corresponding item in sort_keys
:param filters: dictionary of filters; values that are in lists, tuples,
or sets cause an 'IN' operation, while exact matching
is used for other values, see _process_volume_filters
function for more information
:returns: updated query or None
"""
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
query = _volume_get_query(context, session=session)
if filters:
query = _process_volume_filters(query, filters)
if query is None:
return None
marker_volume = None
if marker is not None:
marker_volume = _volume_get(context, marker, session)
return sqlalchemyutils.paginate_query(query, models.Volume, limit,
sort_keys,
marker=marker_volume,
sort_dirs=sort_dirs)
def _process_volume_filters(query, filters):
"""Common filter processing for Volume queries.
Filter values that are in lists, tuples, or sets cause an 'IN' operator
to be used, while exact matching ('==' operator) is used for other values.
A filter key/value of 'no_migration_targets'=True causes volumes with
either a NULL 'migration_status' or a 'migration_status' that does not
start with 'target:' to be retrieved.
A 'metadata' filter key must correspond to a dictionary value of metadata
key-value pairs.
:param query: Model query to use
:param filters: dictionary of filters
:returns: updated query or None
"""
filters = filters.copy()
# 'no_migration_targets' is unique, must be either NULL or
# not start with 'target:'
if filters.get('no_migration_targets', False):
filters.pop('no_migration_targets')
try:
column_attr = getattr(models.Volume, 'migration_status')
conditions = [column_attr == None, # noqa
column_attr.op('NOT LIKE')('target:%')]
query = query.filter(or_(*conditions))
except AttributeError:
LOG.debug("'migration_status' column could not be found.")
return None
# Apply exact match filters for everything else, ensure that the
# filter value exists on the model
for key in filters.keys():
# metadata is unique, must be a dict
if key == 'metadata':
if not isinstance(filters[key], dict):
LOG.debug("'metadata' filter value is not valid.")
return None
continue
try:
column_attr = getattr(models.Volume, key)
# Do not allow relationship properties since those require
# schema specific knowledge
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
LOG.debug(("'%s' filter key is not valid, "
"it maps to a relationship."), key)
return None
except AttributeError:
LOG.debug("'%s' filter key is not valid.", key)
return None
# Holds the simple exact matches
filter_dict = {}
# Iterate over all filters, special case the filter if necessary
for key, value in filters.iteritems():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.iteritems():
query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v)))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(models.Volume, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def process_sort_params(sort_keys, sort_dirs, default_keys=None,
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
if default_keys is None:
default_keys = ['created_at', 'id']
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs):
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys.
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _LE("Unknown sort direction, must be 'desc' or 'asc'.")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _LE("Sort direction array size exceeds sort key array size.")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
return result.target_num
@require_context
def volume_update(context, volume_id, values):
session = get_session()
with session.begin():
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
return volume_ref
@require_context
def volume_attachment_update(context, attachment_id, values):
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref.update(values)
volume_attachment_ref.save(session=session)
return volume_attachment_ref
####################
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
return model_query(context, model, session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
def _volume_x_metadata_get(context, volume_id, model, session=None):
rows = _volume_x_metadata_get_query(context, volume_id, model,
session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec,
session=None):
result = _volume_x_metadata_get_query(context, volume_id,
model, session=session).\
filter_by(key=key).\
first()
if not result:
raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
def _volume_x_metadata_update(context, volume_id, metadata, delete,
model, notfound_exec, session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _volume_x_metadata_get(context, volume_id,
model, session=session)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
notfound_exec,
session=session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
notfound_exec,
session=session)
except notfound_exec:
meta_ref = model()
item.update({"key": meta_key, "volume_id": volume_id})
meta_ref.update(item)
meta_ref.save(session=session)
return _volume_x_metadata_get(context, volume_id, model)
def _volume_user_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
def _volume_user_metadata_get_item(context, volume_id, key, session=None):
return _volume_x_metadata_get_item(context, volume_id, key,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key):
return _volume_user_metadata_get_item(context, volume_id, key)
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
return _volume_user_metadata_get(context, volume_id)
@require_context
@require_volume_exists
@_retry_on_deadlock
def volume_metadata_delete(context, volume_id, key):
_volume_user_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
@require_volume_exists
@_retry_on_deadlock
def volume_metadata_update(context, volume_id, metadata, delete):
return _volume_user_metadata_update(context, volume_id, metadata, delete)
###################
def _volume_admin_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeAdminMetadata,
session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeAdminMetadata, session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeAdminMetadata,
exception.VolumeAdminMetadataNotFound,
session=session)
@require_admin_context
@require_volume_exists
def volume_admin_metadata_get(context, volume_id):
return _volume_admin_metadata_get(context, volume_id)
@require_admin_context
@require_volume_exists
@_retry_on_deadlock
def volume_admin_metadata_delete(context, volume_id, key):
_volume_admin_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
@require_volume_exists
@_retry_on_deadlock
def volume_admin_metadata_update(context, volume_id, metadata, delete):
return _volume_admin_metadata_update(context, volume_id, metadata, delete)
###################
@require_context
def snapshot_create(context, values):
values['snapshot_metadata'] = _metadata_refs(values.get('metadata'),
models.SnapshotMetadata)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
snapshot_ref = models.Snapshot()
snapshot_ref.update(values)
session.add(snapshot_ref)
return _snapshot_get(context, values['id'], session=session)
@require_admin_context
@_retry_on_deadlock
def snapshot_destroy(context, snapshot_id):
session = get_session()
with session.begin():
model_query(context, models.Snapshot, session=session).\
filter_by(id=snapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.SnapshotMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
filter_by(id=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result
@require_context
def snapshot_get(context, snapshot_id):
return _snapshot_get(context, snapshot_id)
@require_admin_context
def snapshot_get_all(context):
return model_query(context, models.Snapshot).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_for_volume(context, volume_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(volume_id=volume_id).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_by_host(context, host, filters=None):
query = model_query(context, models.Snapshot, read_deleted='no',
project_only=True)
if filters:
query = query.filter_by(**filters)
return query.join(models.Snapshot.volume).filter(
models.Volume.host == host).options(
joinedload('snapshot_metadata')).all()
@require_context
def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(cgsnapshot_id=cgsnapshot_id).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.Snapshot).\
filter_by(project_id=project_id).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def _snapshot_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
authorize_project_context(context, project_id)
query = model_query(context,
func.count(models.Snapshot.id),
func.sum(models.Snapshot.volume_size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.join('volume').filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_context
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
return _snapshot_data_get_for_project(context, project_id, volume_type_id)
@require_context
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Return snapshots that were active during window."""
query = model_query(context, models.Snapshot, read_deleted="yes")
query = query.filter(or_(models.Snapshot.deleted_at == None, # noqa
models.Snapshot.deleted_at > begin))
query = query.options(joinedload(models.Snapshot.volume))
if end:
query = query.filter(models.Snapshot.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@require_context
def snapshot_update(context, snapshot_id, values):
session = get_session()
with session.begin():
snapshot_ref = _snapshot_get(context, snapshot_id, session=session)
snapshot_ref.update(values)
return snapshot_ref
####################
def _snapshot_metadata_get_query(context, snapshot_id, session=None):
return model_query(context, models.SnapshotMetadata,
session=session, read_deleted="no").\
filter_by(snapshot_id=snapshot_id)
@require_context
@require_snapshot_exists
def _snapshot_metadata_get(context, snapshot_id, session=None):
rows = _snapshot_metadata_get_query(context, snapshot_id, session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_snapshot_exists
def snapshot_metadata_get(context, snapshot_id):
return _snapshot_metadata_get(context, snapshot_id)
@require_context
@require_snapshot_exists
@_retry_on_deadlock
def snapshot_metadata_delete(context, snapshot_id, key):
_snapshot_metadata_get_query(context, snapshot_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_metadata_get_item(context, snapshot_id, key, session=None):
result = _snapshot_metadata_get_query(context,
snapshot_id,
session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.SnapshotMetadataNotFound(metadata_key=key,
snapshot_id=snapshot_id)
return result
@require_context
@require_snapshot_exists
@_retry_on_deadlock
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
session = get_session()
with session.begin():
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id,
session)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context,
snapshot_id,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _snapshot_metadata_get_item(context, snapshot_id,
meta_key, session)
except exception.SnapshotMetadataNotFound:
meta_ref = models.SnapshotMetadata()
item.update({"key": meta_key, "snapshot_id": snapshot_id})
meta_ref.update(item)
meta_ref.save(session=session)
return snapshot_metadata_get(context, snapshot_id)
###################
@require_admin_context
def volume_type_create(context, values, projects=None):
"""Create a new volume type.
In order to pass in extra specs, the values dict should contain a
'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = str(uuid.uuid4())
projects = projects or []
session = get_session()
with session.begin():
try:
_volume_type_get_by_name(context, values['name'], session)
raise exception.VolumeTypeExists(id=values['name'])
except exception.VolumeTypeNotFoundByName:
pass
try:
_volume_type_get(context, values['id'], session)
raise exception.VolumeTypeExists(id=values['id'])
except exception.VolumeTypeNotFound:
pass
try:
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
models.VolumeTypeExtraSpecs)
volume_type_ref = models.VolumeTypes()
volume_type_ref.update(values)
session.add(volume_type_ref)
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.VolumeTypeProjects()
access_ref.update({"volume_type_id": volume_type_ref.id,
"project_id": project})
access_ref.save(session=session)
return volume_type_ref
def _volume_type_get_query(context, session=None, read_deleted=None,
expected_fields=None):
expected_fields = expected_fields or []
query = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if 'projects' in expected_fields:
query = query.options(joinedload('projects'))
if not context.is_admin:
the_filter = [models.VolumeTypes.is_public == true()]
projects_attr = getattr(models.VolumeTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_admin_context
def volume_type_update(context, volume_type_id, values):
session = get_session()
with session.begin():
# Check it exists
volume_type_ref = _volume_type_ref_get(context,
volume_type_id,
session)
if not volume_type_ref:
raise exception.VolumeTypeNotFound(type_id=volume_type_id)
# No description change
if values['description'] is None:
del values['description']
# No name change
if values['name'] is None:
del values['name']
else:
# Volume type name is unique. If change to a name that belongs to
# a different volume_type , it should be prevented.
check_vol_type = None
try:
check_vol_type = \
_volume_type_get_by_name(context,
values['name'],
session=session)
except exception.VolumeTypeNotFoundByName:
pass
else:
if check_vol_type.get('id') != volume_type_id:
raise exception.VolumeTypeExists(id=values['name'])
volume_type_ref.update(values)
volume_type_ref.save(session=session)
volume_type = volume_type_get(context, volume_type_id)
return volume_type
@require_context
def volume_type_get_all(context, inactive=False, filters=None):
"""Returns a dict describing all volume_types with name as key."""
filters = filters or {}
read_deleted = "yes" if inactive else "no"
query = _volume_type_get_query(context, read_deleted=read_deleted)
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.VolumeTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
projects_attr = getattr(models.VolumeTypes, 'projects')
the_filter.extend([
projects_attr.any(project_id=context.project_id, deleted=False)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
rows = query.order_by("name").all()
result = {}
for row in rows:
result[row['name']] = _dict_with_extra_specs(row)
return result
def _volume_type_get_id_from_volume_type_query(context, id, session=None):
return model_query(
context, models.VolumeTypes.id, read_deleted="no",
session=session, base_model=models.VolumeTypes).\
filter_by(id=id)
def _volume_type_get_id_from_volume_type(context, id, session=None):
result = _volume_type_get_id_from_volume_type_query(
context, id, session=session).first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result[0]
@require_context
def _volume_type_get(context, id, session=None, inactive=False,
expected_fields=None):
expected_fields = expected_fields or []
read_deleted = "yes" if inactive else "no"
result = _volume_type_get_query(
context, session, read_deleted, expected_fields).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
vtype = _dict_with_extra_specs(result)
if 'projects' in expected_fields:
vtype['projects'] = [p['project_id'] for p in result['projects']]
return vtype
@require_context
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific volume_type."""
return _volume_type_get(context, id,
session=None,
inactive=inactive,
expected_fields=expected_fields)
@require_context
def _volume_type_ref_get(context, id, session=None, inactive=False):
read_deleted = "yes" if inactive else "no"
result = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return result
@require_context
def _volume_type_get_by_name(context, name, session=None):
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return _dict_with_extra_specs(result)
@require_context
def volume_type_get_by_name(context, name):
"""Return a dict describing specific volume_type."""
return _volume_type_get_by_name(context, name)
@require_context
def volume_types_get_by_name_or_id(context, volume_type_list):
"""Return a dict describing specific volume_type."""
req_volume_types = []
for vol_t in volume_type_list:
if not uuidutils.is_uuid_like(vol_t):
vol_type = _volume_type_get_by_name(context, vol_t)
else:
vol_type = _volume_type_get(context, vol_t)
req_volume_types.append(vol_type)
return req_volume_types
@require_admin_context
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
read_deleted = "yes" if inactive else "no"
return model_query(context, models.VolumeTypes,
read_deleted=read_deleted). \
filter_by(qos_specs_id=qos_specs_id).all()
@require_admin_context
def volume_type_qos_associate(context, type_id, qos_specs_id):
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
update({'qos_specs_id': qos_specs_id,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from qos specs."""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types associated with specified qos specs."""
session = get_session()
with session.begin():
session.query(models.VolumeTypes). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_specs_get(context, type_id):
"""Return all qos specs for given volume type.
result looks like:
{
'qos_specs':
{
'id': 'qos-specs-id',
'name': 'qos_specs_name',
'consumer': 'Consumer',
'specs': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'
}
}
}
"""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
row = session.query(models.VolumeTypes). \
options(joinedload('qos_specs')). \
filter_by(id=type_id). \
first()
# row.qos_specs is a list of QualityOfServiceSpecs ref
specs = _dict_with_qos_specs(row.qos_specs)
if not specs:
# turn empty list to None
specs = None
else:
specs = specs[0]
return {'qos_specs': specs}
@require_admin_context
@_retry_on_deadlock
def volume_type_destroy(context, id):
session = get_session()
with session.begin():
_volume_type_get(context, id, session)
results = model_query(context, models.Volume, session=session). \
filter_by(volume_type_id=id).all()
if results:
msg = _('VolumeType %s deletion failed, VolumeType in use.') % id
LOG.error(msg)
raise exception.VolumeTypeInUse(volume_type_id=id)
model_query(context, models.VolumeTypes, session=session).\
filter_by(id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeTypeExtraSpecs, session=session).\
filter_by(volume_type_id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_get_active_by_window(context,
begin,
end=None,
project_id=None):
"""Return volumes that were active during window."""
query = model_query(context, models.Volume, read_deleted="yes")
query = query.filter(or_(models.Volume.deleted_at == None, # noqa
models.Volume.deleted_at > begin))
if end:
query = query.filter(models.Volume.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
def _volume_type_access_query(context, session=None):
return model_query(context, models.VolumeTypeProjects, session=session,
read_deleted="no")
@require_admin_context
def volume_type_access_get_all(context, type_id):
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
return _volume_type_access_query(context).\
filter_by(volume_type_id=volume_type_id).all()
@require_admin_context
def volume_type_access_add(context, type_id, project_id):
"""Add given tenant to the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
access_ref = models.VolumeTypeProjects()
access_ref.update({"volume_type_id": volume_type_id,
"project_id": project_id})
session = get_session()
with session.begin():
try:
access_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
project_id=project_id)
return access_ref
@require_admin_context
def volume_type_access_remove(context, type_id, project_id):
"""Remove given tenant from the volume type access list."""
volume_type_id = _volume_type_get_id_from_volume_type(context, type_id)
count = _volume_type_access_query(context).\
filter_by(volume_type_id=volume_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.VolumeTypeAccessNotFound(
volume_type_id=type_id, project_id=project_id)
####################
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def volume_type_extra_specs_delete(context, volume_type_id, key):
session = get_session()
with session.begin():
_volume_type_extra_specs_get_item(context, volume_type_id, key,
session)
_volume_type_extra_specs_query(context, volume_type_id, session).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key,
volume_type_id=volume_type_id)
return result
@require_context
def volume_type_extra_specs_update_or_create(context, volume_type_id,
specs):
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.iteritems():
try:
spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
except exception.VolumeTypeExtraSpecsNotFound:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"volume_type_id": volume_type_id,
"deleted": False})
spec_ref.save(session=session)
return specs
####################
@require_admin_context
def qos_specs_create(context, values):
"""Create a new QoS specs.
:param values dictionary that contains specifications for QoS
e.g. {'name': 'Name',
'qos_specs': {
'consumer': 'front-end',
'total_iops_sec': 1000,
'total_bytes_sec': 1024000
}
}
"""
specs_id = str(uuid.uuid4())
session = get_session()
with session.begin():
try:
_qos_specs_get_by_name(context, values['name'], session)
raise exception.QoSSpecsExists(specs_id=values['name'])
except exception.QoSSpecsNotFound:
pass
try:
# Insert a root entry for QoS specs
specs_root = models.QualityOfServiceSpecs()
root = dict(id=specs_id)
# 'QoS_Specs_Name' is an internal reserved key to store
# the name of QoS specs
root['key'] = 'QoS_Specs_Name'
root['value'] = values['name']
LOG.debug("DB qos_specs_create(): root %s", root)
specs_root.update(root)
specs_root.save(session=session)
# Insert all specification entries for QoS specs
for k, v in values['qos_specs'].iteritems():
item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()
spec_entry.update(item)
spec_entry.save(session=session)
except Exception as e:
raise db_exc.DBError(e)
return dict(id=specs_root.id, name=specs_root.value)
@require_admin_context
def _qos_specs_get_by_name(context, name, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
results = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(key='QoS_Specs_Name'). \
filter_by(value=name). \
options(joinedload('specs')).all()
if not results:
raise exception.QoSSpecsNotFound(specs_id=name)
return results
@require_admin_context
def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
result = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(id=qos_specs_id). \
options(joinedload_all('specs')).all()
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return result
def _dict_with_children_specs(specs):
"""Convert specs list to a dict."""
result = {}
for spec in specs:
# Skip deleted keys
if not spec['deleted']:
result.update({spec['key']: spec['value']})
return result
def _dict_with_qos_specs(rows):
"""Convert qos specs query results to list.
Qos specs query results are a list of quality_of_service_specs refs,
some are root entry of a qos specs (key == 'QoS_Specs_Name') and the
rest are children entry, a.k.a detailed specs for a qos specs. This
function converts query results to a dict using spec name as key.
"""
result = []
for row in rows:
if row['key'] == 'QoS_Specs_Name':
member = {}
member['name'] = row['value']
member.update(dict(id=row['id']))
if row.specs:
spec_dict = _dict_with_children_specs(row.specs)
member.update(dict(consumer=spec_dict['consumer']))
del spec_dict['consumer']
member.update(dict(specs=spec_dict))
result.append(member)
return result
@require_admin_context
def qos_specs_get(context, qos_specs_id, inactive=False):
rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_get_all(context, inactive=False, filters=None):
"""Returns a list of all qos_specs.
Results is like:
[{
'id': SPECS-UUID,
'name': 'qos_spec-1',
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
{
'id': SPECS-UUID,
'name': 'qos_spec-2',
'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
]
"""
filters = filters or {}
# TODO(zhiteng) Add filters for 'consumer'
read_deleted = "yes" if inactive else "no"
rows = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted). \
options(joinedload_all('specs')).all()
return _dict_with_qos_specs(rows)
@require_admin_context
def qos_specs_get_by_name(context, name, inactive=False):
rows = _qos_specs_get_by_name(context, name, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_associations_get(context, qos_specs_id):
"""Return all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_associations_get(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
# Raise QoSSpecsNotFound if no specs found
_qos_specs_get_ref(context, qos_specs_id, None)
return volume_type_qos_associations_get(context, qos_specs_id)
@require_admin_context
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate volume type from specified qos specs."""
return volume_type_qos_associate(context, type_id, qos_specs_id)
@require_admin_context
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from specified qos specs."""
return volume_type_qos_disassociate(context, qos_specs_id, type_id)
@require_admin_context
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_disassociate_all(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_disassociate_all(context, qos_specs_id)
@require_admin_context
def qos_specs_item_delete(context, qos_specs_id, key):
session = get_session()
with session.begin():
_qos_specs_get_item(context, qos_specs_id, key)
session.query(models.QualityOfServiceSpecs). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def qos_specs_delete(context, qos_specs_id):
session = get_session()
with session.begin():
_qos_specs_get_ref(context, qos_specs_id, session)
session.query(models.QualityOfServiceSpecs).\
filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id,
models.QualityOfServiceSpecs.specs_id ==
qos_specs_id)).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def _qos_specs_get_item(context, qos_specs_id, key, session=None):
result = model_query(context, models.QualityOfServiceSpecs,
session=session). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
first()
if not result:
raise exception.QoSSpecsKeyNotFound(
specs_key=key,
specs_id=qos_specs_id)
return result
@require_admin_context
def qos_specs_update(context, qos_specs_id, specs):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
session = get_session()
with session.begin():
# make sure qos specs exists
_qos_specs_get_ref(context, qos_specs_id, session)
spec_ref = None
for key in specs.keys():
try:
spec_ref = _qos_specs_get_item(
context, qos_specs_id, key, session)
except exception.QoSSpecsKeyNotFound:
spec_ref = models.QualityOfServiceSpecs()
id = None
if spec_ref.get('id', None):
id = spec_ref['id']
else:
id = str(uuid.uuid4())
value = dict(id=id, key=key, value=specs[key],
specs_id=qos_specs_id,
deleted=False)
LOG.debug('qos_specs_update() value: %s' % value)
spec_ref.update(value)
spec_ref.save(session=session)
return specs
####################
@require_context
def volume_type_encryption_get(context, volume_type_id, session=None):
return model_query(context, models.Encryption, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id).first()
@require_admin_context
def volume_type_encryption_delete(context, volume_type_id):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
encryption.update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def volume_type_encryption_create(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = models.Encryption()
if 'volume_type_id' not in values:
values['volume_type_id'] = volume_type_id
if 'encryption_id' not in values:
values['encryption_id'] = six.text_type(uuid.uuid4())
encryption.update(values)
session.add(encryption)
return encryption
@require_admin_context
def volume_type_encryption_update(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
if not encryption:
raise exception.VolumeTypeEncryptionNotFound(type_id=
volume_type_id)
encryption.update(values)
return encryption
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
volume_list = _volume_get_query(context, session=session,
project_only=False).\
filter_by(volume_type_id=volume_type_id).\
all()
return volume_list
####################
@require_context
def volume_encryption_metadata_get(context, volume_id, session=None):
"""Return the encryption key id for a given volume."""
volume_ref = _volume_get(context, volume_id)
encryption_ref = volume_type_encryption_get(context,
volume_ref['volume_type_id'])
return {
'encryption_key_id': volume_ref['encryption_key_id'],
'control_location': encryption_ref['control_location'],
'cipher': encryption_ref['cipher'],
'key_size': encryption_ref['key_size'],
'provider': encryption_ref['provider'],
}
####################
@require_context
def _volume_glance_metadata_get_all(context, session=None):
query = model_query(context,
models.VolumeGlanceMetadata,
session=session)
if is_user_context(context):
query = query.filter(
models.Volume.id == models.VolumeGlanceMetadata.volume_id,
models.Volume.project_id == context.project_id)
return query.all()
@require_context
def volume_glance_metadata_get_all(context):
"""Return the Glance metadata for all volumes."""
return _volume_glance_metadata_get_all(context)
@require_context
@require_volume_exists
def _volume_glance_metadata_get(context, volume_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(volume_id=volume_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=volume_id)
return rows
@require_context
@require_volume_exists
def volume_glance_metadata_get(context, volume_id):
"""Return the Glance metadata for the specified volume."""
return _volume_glance_metadata_get(context, volume_id)
@require_context
@require_snapshot_exists
def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=snapshot_id)
return rows
@require_context
@require_snapshot_exists
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return _volume_snapshot_glance_metadata_get(context, snapshot_id)
@require_context
@require_volume_exists
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for a volume by adding a new key:value pair.
This API does not support changing the value of a key once it has been
created.
"""
session = get_session()
with session.begin():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = six.text_type(value)
session.add(vol_glance_metadata)
return
@require_context
@require_snapshot_exists
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This copies all of the key:value pairs from the originating volume, to
ensure that a volume created from the snapshot will retain the
original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context, volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.snapshot_id = snapshot_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
This copies all all of the key:value pairs from the originating volume,
to ensure that a volume created from the volume (clone) will
retain the original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context,
src_volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot) by
copying all of the key:value pairs from the originating snapshot.
This is so that the Glance metadata from the original volume is retained.
"""
session = get_session()
with session.begin():
metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
def volume_glance_metadata_delete_by_volume(context, volume_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def backup_get(context, backup_id):
result = model_query(context, models.Backup, project_only=True).\
filter_by(id=backup_id).\
first()
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
return result
def _backup_get_all(context, filters=None):
session = get_session()
with session.begin():
# Generate the query
query = model_query(context, models.Backup)
if filters:
query = query.filter_by(**filters)
return query.all()
@require_admin_context
def backup_get_all(context, filters=None):
return _backup_get_all(context, filters)
@require_admin_context
def backup_get_all_by_host(context, host):
return model_query(context, models.Backup).filter_by(host=host).all()
@require_context
def backup_get_all_by_project(context, project_id, filters=None):
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _backup_get_all(context, filters)
@require_context
def backup_get_all_by_volume(context, volume_id, filters=None):
authorize_project_context(context, volume_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['volume_id'] = volume_id
return _backup_get_all(context, filters)
@require_context
def backup_create(context, values):
backup = models.Backup()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
backup.update(values)
session = get_session()
with session.begin():
backup.save(session)
return backup
@require_context
def backup_update(context, backup_id, values):
session = get_session()
with session.begin():
backup = model_query(context, models.Backup,
session=session, read_deleted="yes").\
filter_by(id=backup_id).first()
if not backup:
raise exception.BackupNotFound(
_("No backup with id %s") % backup_id)
backup.update(values)
return backup
@require_admin_context
def backup_destroy(context, backup_id):
model_query(context, models.Backup).\
filter_by(id=backup_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def _transfer_get(context, transfer_id, session=None):
query = model_query(context, models.Transfer,
session=session).\
filter_by(id=transfer_id)
if not is_admin_context(context):
volume = models.Volume
query = query.filter(models.Transfer.volume_id == volume.id,
volume.project_id == context.project_id)
result = query.first()
if not result:
raise exception.TransferNotFound(transfer_id=transfer_id)
return result
@require_context
def transfer_get(context, transfer_id):
return _transfer_get(context, transfer_id)
def _translate_transfers(transfers):
results = []
for transfer in transfers:
r = {}
r['id'] = transfer['id']
r['volume_id'] = transfer['volume_id']
r['display_name'] = transfer['display_name']
r['created_at'] = transfer['created_at']
r['deleted'] = transfer['deleted']
results.append(r)
return results
@require_admin_context
def transfer_get_all(context):
results = model_query(context, models.Transfer).all()
return _translate_transfers(results)
@require_context
def transfer_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
query = model_query(context, models.Transfer).\
filter(models.Volume.id == models.Transfer.volume_id,
models.Volume.project_id == project_id)
results = query.all()
return _translate_transfers(results)
@require_context
def transfer_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
volume_ref = _volume_get(context,
values['volume_id'],
session=session)
if volume_ref['status'] != 'available':
msg = _('Volume must be available')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume_ref['status'] = 'awaiting-transfer'
transfer = models.Transfer()
transfer.update(values)
session.add(transfer)
volume_ref.update(volume_ref)
return transfer
@require_context
@_retry_on_deadlock
def transfer_destroy(context, transfer_id):
session = get_session()
with session.begin():
transfer_ref = _transfer_get(context,
transfer_id,
session=session)
volume_ref = _volume_get(context,
transfer_ref['volume_id'],
session=session)
# If the volume state is not 'awaiting-transfer' don't change it, but
# we can still mark the transfer record as deleted.
if volume_ref['status'] != 'awaiting-transfer':
msg = _('Volume in unexpected state %s, '
'expected awaiting-transfer') % volume_ref['status']
LOG.error(msg)
else:
volume_ref['status'] = 'available'
volume_ref.update(volume_ref)
volume_ref.save(session=session)
model_query(context, models.Transfer, session=session).\
filter_by(id=transfer_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def transfer_accept(context, transfer_id, user_id, project_id):
session = get_session()
with session.begin():
transfer_ref = _transfer_get(context, transfer_id, session)
volume_id = transfer_ref['volume_id']
volume_ref = _volume_get(context, volume_id, session=session)
if volume_ref['status'] != 'awaiting-transfer':
msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in '
'unexpected state %(status)s, expected '
'awaiting-transfer') % {'transfer_id': transfer_id,
'volume_id': volume_ref['id'],
'status': volume_ref['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume_ref['status'] = 'available'
volume_ref['user_id'] = user_id
volume_ref['project_id'] = project_id
volume_ref['updated_at'] = literal_column('updated_at')
volume_ref.update(volume_ref)
session.query(models.Transfer).\
filter_by(id=transfer_ref['id']).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_admin_context
def _consistencygroup_data_get_for_project(context, project_id,
session=None):
query = model_query(context,
func.count(models.ConsistencyGroup.id),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
result = query.first()
return (0, result[0] or 0)
@require_admin_context
def consistencygroup_data_get_for_project(context, project_id):
return _consistencygroup_data_get_for_project(context, project_id)
@require_context
def _consistencygroup_get(context, consistencygroup_id, session=None):
result = model_query(context, models.ConsistencyGroup, session=session,
project_only=True).\
filter_by(id=consistencygroup_id).\
first()
if not result:
raise exception.ConsistencyGroupNotFound(
consistencygroup_id=consistencygroup_id)
return result
@require_context
def consistencygroup_get(context, consistencygroup_id):
return _consistencygroup_get(context, consistencygroup_id)
@require_admin_context
def consistencygroup_get_all(context):
return model_query(context, models.ConsistencyGroup).all()
@require_context
def consistencygroup_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.ConsistencyGroup).\
filter_by(project_id=project_id).all()
@require_context
def consistencygroup_create(context, values):
consistencygroup = models.ConsistencyGroup()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
consistencygroup.update(values)
session.add(consistencygroup)
return _consistencygroup_get(context, values['id'], session=session)
@require_context
def consistencygroup_update(context, consistencygroup_id, values):
session = get_session()
with session.begin():
result = model_query(context, models.ConsistencyGroup,
project_only=True).\
filter_by(id=consistencygroup_id).\
first()
if not result:
raise exception.ConsistencyGroupNotFound(
_("No consistency group with id %s") % consistencygroup_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def consistencygroup_destroy(context, consistencygroup_id):
session = get_session()
with session.begin():
model_query(context, models.ConsistencyGroup, session=session).\
filter_by(id=consistencygroup_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def _cgsnapshot_get(context, cgsnapshot_id, session=None):
result = model_query(context, models.Cgsnapshot, session=session,
project_only=True).\
filter_by(id=cgsnapshot_id).\
first()
if not result:
raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id)
return result
@require_context
def cgsnapshot_get(context, cgsnapshot_id):
return _cgsnapshot_get(context, cgsnapshot_id)
@require_admin_context
def cgsnapshot_get_all(context):
return model_query(context, models.Cgsnapshot).all()
@require_admin_context
def cgsnapshot_get_all_by_group(context, group_id):
return model_query(context, models.Cgsnapshot).\
filter_by(consistencygroup_id=group_id).all()
@require_context
def cgsnapshot_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.Cgsnapshot).\
filter_by(project_id=project_id).all()
@require_context
def cgsnapshot_create(context, values):
cgsnapshot = models.Cgsnapshot()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
cgsnapshot.update(values)
session.add(cgsnapshot)
return _cgsnapshot_get(context, values['id'], session=session)
@require_context
def cgsnapshot_update(context, cgsnapshot_id, values):
session = get_session()
with session.begin():
result = model_query(context, models.Cgsnapshot, project_only=True).\
filter_by(id=cgsnapshot_id).\
first()
if not result:
raise exception.CgSnapshotNotFound(
_("No cgsnapshot with id %s") % cgsnapshot_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def cgsnapshot_destroy(context, cgsnapshot_id):
session = get_session()
with session.begin():
model_query(context, models.Cgsnapshot, session=session).\
filter_by(id=cgsnapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than age from cinder tables."""
try:
age_in_days = int(age_in_days)
except ValueError:
msg = _LE('Invalid value for age, %(age)s')
LOG.exception(msg, {'age': age_in_days})
raise exception.InvalidParameterValue(msg % {'age': age_in_days})
if age_in_days <= 0:
msg = _LE('Must supply a positive value for age')
LOG.exception(msg)
raise exception.InvalidParameterValue(msg)
engine = get_engine()
session = get_session()
metadata = MetaData()
metadata.bind = engine
tables = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__") \
and hasattr(model_class, "deleted"):
tables.append(model_class.__tablename__)
# Reorder the list so the volumes table is last to avoid FK constraints
tables.remove("volumes")
tables.append("volumes")
for table in tables:
t = Table(table, metadata, autoload=True)
LOG.info(_LI('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s'), {'age': age_in_days,
'table': table})
deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days)
try:
with session.begin():
result = session.execute(
t.delete()
.where(t.c.deleted_at < deleted_age))
except db_exc.DBReferenceError:
LOG.exception(_LE('DBError detected when purging from '
'table=%(table)s'), {'table': table})
raise
rows_purged = result.rowcount
LOG.info(_LI("Deleted %(row)d rows from table=%(table)s"),
{'row': rows_purged, 'table': table})
###############################
@require_context
def driver_initiator_data_update(context, initiator, namespace, updates):
session = get_session()
with session.begin():
set_values = updates.get('set_values', {})
for key, value in set_values.items():
data = session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
filter_by(key=key).\
first()
if data:
data.update({'value': value})
data.save(session=session)
else:
data = models.DriverInitiatorData()
data.initiator = initiator
data.namespace = namespace
data.key = key
data.value = value
session.add(data)
remove_values = updates.get('remove_values', [])
for key in remove_values:
session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
filter_by(key=key).\
delete()
@require_context
def driver_initiator_data_get(context, initiator, namespace):
session = get_session()
with session.begin():
return session.query(models.DriverInitiatorData).\
filter_by(initiator=initiator).\
filter_by(namespace=namespace).\
all()
| apache-2.0 | -3,084,373,201,303,192,600 | 33.133151 | 79 | 0.595613 | false |
vishnu2kmohan/dcos | test_util/aws.py | 1 | 23175 | """ Abstractions for handling resources via Amazon Web Services (AWS) API
The intention of these utilities is to allow other infrastructure to
interact with AWS without having to understand AWS APIs. Additionally,
this module provides helper functions for the most common queries required
to manipulate and test a DC/OS cluster, which would be otherwise cumbersome
to do with AWS API calls only
BotoWrapper: AWS credentials and region bound to various helper methods
CfStack: Generic representation of a CloudFormation stack
DcosCfStack: Represents DC/OS in a simple deployment
DcosZenCfStack: Represents DC/OS deployed from a zen template
MasterStack: thin wrapper for master stack in a zen template
PrivateAgentStack: thin wrapper for public agent stack in a zen template
PublicAgentStack: thin wrapper for public agent stack in a zen template
BareClusterCfStack: Represents a homogeneous cluster of hosts with a specific AMI
"""
import logging
import boto3
import pkg_resources
import retrying
from botocore.exceptions import ClientError
from test_util.helpers import Host, retry_boto_rate_limits, SshInfo
log = logging.getLogger(__name__)
def template_by_instance_type(instance_type):
if instance_type.split('.')[0] in ('c4', 't2', 'm4'):
template = pkg_resources.resource_string('test_util', 'templates/vpc-ebs-only-cluster-template.json')
else:
template = pkg_resources.resource_string('test_util', 'templates/vpc-cluster-template.json')
return template.decode('utf-8')
def param_dict_to_aws_format(user_parameters):
return [{'ParameterKey': str(k), 'ParameterValue': str(v)} for k, v in user_parameters.items()]
@retry_boto_rate_limits
def instances_to_hosts(instances):
return [Host(i.private_ip_address, i.public_ip_address) for i in instances]
def fetch_stack(stack_name, boto_wrapper):
log.debug('Attemping to fetch AWS Stack: {}'.format(stack_name))
stack = boto_wrapper.resource('cloudformation').Stack(stack_name)
for resource in stack.resource_summaries.all():
if resource.logical_resource_id == 'MasterStack':
log.debug('Using Zen DC/OS Cloudformation interface')
return DcosZenCfStack(stack_name, boto_wrapper)
if resource.logical_resource_id == 'MasterServerGroup':
log.debug('Using Basic DC/OS Cloudformation interface')
return DcosCfStack(stack_name, boto_wrapper)
if resource.logical_resource_id == 'BareServerAutoScale':
log.debug('Using Bare Cluster Cloudformation interface')
return BareClusterCfStack(stack_name, boto_wrapper)
log.warning('No recognized resources found; using generic stack')
return CfStack(stack_name, boto_wrapper)
class BotoWrapper():
def __init__(self, region, aws_access_key_id, aws_secret_access_key):
self.region = region
self.session = boto3.session.Session(
aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def client(self, name):
return self.session.client(service_name=name, region_name=self.region)
def resource(self, name):
return self.session.resource(service_name=name, region_name=self.region)
def create_key_pair(self, key_name):
"""Returns private key of newly generated pair
"""
log.info('Creating KeyPair: {}'.format(key_name))
key = self.client('ec2').create_key_pair(KeyName=key_name)
return key['KeyMaterial']
def delete_key_pair(self, key_name):
log.info('Deleting KeyPair: {}'.format(key_name))
self.resource('ec2').KeyPair(key_name).delete()
def create_stack(self, name, parameters, template_url=None, template_body=None, deploy_timeout=60):
"""Pulls template and checks user params versus temlate params.
Does simple casting of strings or numbers
Starts stack creation if validation is successful
"""
log.info('Requesting AWS CloudFormation: {}'.format(name))
args = {
'StackName': name,
'DisableRollback': True,
'TimeoutInMinutes': deploy_timeout,
'Capabilities': ['CAPABILITY_IAM'],
# this python API only accepts data in string format; cast as string here
# so that we may pass parameters directly from yaml (which parses numbers as non-strings)
'Parameters': param_dict_to_aws_format(parameters)}
if template_body is not None:
assert template_url is None, 'tempate_body and template_url cannot be supplied simultaneously'
args['TemplateBody'] = template_body
else:
assert template_url is not None, 'template_url must be set if template_body is not provided'
args['TemplateURL'] = template_url
return self.resource('cloudformation').create_stack(**args)
def create_vpc_tagged(self, cidr, name_tag):
ec2 = self.client('ec2')
log.info('Creating new VPC...')
vpc_id = ec2.create_vpc(CidrBlock=cidr, InstanceTenancy='default')['Vpc']['VpcId']
ec2.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
ec2.create_tags(Resources=[vpc_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
log.info('Created VPC with ID: {}'.format(vpc_id))
return vpc_id
def create_internet_gateway_tagged(self, vpc_id, name_tag):
ec2 = self.client('ec2')
log.info('Creating new InternetGateway...')
gateway_id = ec2.create_internet_gateway()['InternetGateway']['InternetGatewayId']
ec2.attach_internet_gateway(InternetGatewayId=gateway_id, VpcId=vpc_id)
ec2.create_tags(Resources=[gateway_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
log.info('Created internet gateway with ID: {}'.format(gateway_id))
return gateway_id
def create_subnet_tagged(self, vpc_id, cidr, name_tag):
ec2 = self.client('ec2')
log.info('Creating new Subnet...')
subnet_id = ec2.create_subnet(VpcId=vpc_id, CidrBlock=cidr)['Subnet']['SubnetId']
ec2.create_tags(Resources=[subnet_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
ec2.get_waiter('subnet_available').wait(SubnetIds=[subnet_id])
log.info('Created subnet with ID: {}'.format(subnet_id))
return subnet_id
def delete_subnet(self, subnet_id):
log.info('Deleting subnet: {}'.format(subnet_id))
self.client('ec2').delete_subnet(SubnetId=subnet_id)
def delete_internet_gateway(self, gateway_id):
ig = self.resource('ec2').InternetGateway(gateway_id)
for vpc in ig.attachments:
vpc_id = vpc['VpcId']
log.info('Detaching gateway {} from vpc {}'.format(gateway_id, vpc_id))
ig.detach_from_vpc(VpcId=vpc_id)
log.info('Deleting internet gateway: {}'.format(gateway_id))
ig.delete()
def delete_vpc(self, vpc_id):
log.info('Deleting vpc: {}'.format(vpc_id))
self.client('ec2').delete_vpc(VpcId=vpc_id)
@retry_boto_rate_limits
def get_auto_scaling_instances(self, asg_physical_resource_id):
""" Returns instance objects as described here:
http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#instance
"""
ec2 = self.resource('ec2')
return [ec2.Instance(i['InstanceId']) for asg in self.client('autoscaling').
describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_physical_resource_id])
['AutoScalingGroups'] for i in asg['Instances']]
class CfStack:
def __init__(self, stack_name, boto_wrapper):
self.boto_wrapper = boto_wrapper
self.stack = self.boto_wrapper.resource('cloudformation').Stack(stack_name)
def wait_for_status_change(self, state_1, state_2):
"""
Note: Do not use unwrapped boto waiter class, it has very poor error handling
Stacks can have one of the following statuses. See:
http://boto3.readthedocs.io/en/latest/reference/
services/cloudformation.html#CloudFormation.Client.describe_stacks
CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE
ROLLBACK_IN_PROGRESS, ROLLBACK_FAILED, ROLLBACK_COMPLETE
DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE
UPDATE_IN_PROGRESS, UPDATE_COMPLETE_CLEANUP_IN_PROGRESS
UPDATE_COMPLETE, UPDATE_ROLLBACK_IN_PROGRESS
UPDATE_ROLLBACK_FAILED, UPDATE_ROLLBACK_COMPLETE
UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS
"""
log.info('Waiting for status to change from {} to {}'.format(state_1, state_2))
@retrying.retry(wait_fixed=60 * 1000,
retry_on_result=lambda res: res is False,
retry_on_exception=lambda ex: False)
def wait_loop():
stack_details = self.get_stack_details()
stack_status = stack_details['StackStatus']
if stack_status == state_2:
return True
if stack_status != state_1:
log.error('Stack Details: {}'.format(stack_details))
for event in self.get_stack_events():
log.error('Stack Events: {}'.format(event))
raise Exception('StackStatus changed unexpectedly to: {}'.format(stack_status))
log.info('Continuing to wait...')
return False
wait_loop()
def wait_for_complete(self):
status = self.get_stack_details()['StackStatus']
if status.endswith('_COMPLETE'):
return
elif status.endswith('_IN_PROGRESS'):
self.wait_for_status_change(
status, status.replace('IN_PROGRESS', 'COMPLETE'))
else:
raise Exception('AWS Stack has entered unexpected state: {}'.format(status))
@retry_boto_rate_limits
def get_stack_details(self):
details = self.boto_wrapper.client('cloudformation').describe_stacks(
StackName=self.stack.stack_id)['Stacks'][0]
log.debug('Stack details: {}'.format(details))
return details
@retry_boto_rate_limits
def get_stack_events(self):
log.debug('Requesting stack events')
return self.boto_wrapper.client('cloudformation').describe_stack_events(
StackName=self.stack.stack_id)['StackEvents']
def get_parameter(self, param):
"""Returns param if in stack parameters, else returns None
"""
for p in self.stack.parameters:
if p['ParameterKey'] == param:
return p['ParameterValue']
raise KeyError('Key not found in template parameters: {}. Parameters: {}'.
format(param, self.stack.parameters))
def delete(self):
stack_id = self.stack.stack_id
log.info('Deleting stack: {}'.format(stack_id))
# boto stacks become unusable after deletion (e.g. status/info checks) if name-based
self.stack = self.boto_wrapper.resource('cloudformation').Stack(stack_id)
self.stack.delete()
log.info('Delete successfully initiated for {}'.format(stack_id))
class CleanupS3BucketMixin:
def delete_exhibitor_s3_bucket(self):
""" A non-empty S3 bucket cannot be deleted, so check to
see if it should be emptied first. If its non-empty, but
has more than one item, error out as the bucket is perhaps
not an exhibitor bucket and the user should be alerted
"""
try:
bucket = self.boto_wrapper.resource('s3').Bucket(
self.stack.Resource('ExhibitorS3Bucket').physical_resource_id)
except ClientError:
log.exception('Bucket could not be fetched')
log.warning('S3 bucket not found when expected during delete, moving on...')
return
log.info('Starting bucket {} deletion'.format(bucket))
all_objects = list(bucket.objects.all())
obj_count = len(all_objects)
if obj_count == 1:
all_objects[0].delete()
elif obj_count > 1:
raise Exception('Expected on item in Exhibitor S3 bucket but found: ' + obj_count)
log.info('Trying deleting bucket {} itself'.format(bucket))
bucket.delete()
def delete(self):
self.delete_exhibitor_s3_bucket()
super().delete()
class DcosCfStack(CleanupS3BucketMixin, CfStack):
""" This abstraction will work for a simple DC/OS template.
A simple template has its exhibitor bucket and auto scaling groups
for each of the master, public agent, and private agent groups
"""
@classmethod
def create(cls, stack_name: str, template_url: str, public_agents: int, private_agents: int,
admin_location: str, key_pair_name: str, boto_wrapper: BotoWrapper):
parameters = {
'KeyName': key_pair_name,
'AdminLocation': admin_location,
'PublicSlaveInstanceCount': str(public_agents),
'SlaveInstanceCount': str(private_agents)}
stack = boto_wrapper.create_stack(stack_name, parameters, template_url=template_url)
# Use stack_name as the binding identifier. At time of implementation,
# stack.stack_name returns stack_id if Stack was created with ID
return cls(stack.stack_id, boto_wrapper), SSH_INFO['coreos']
@property
def master_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('MasterServerGroup').physical_resource_id)
@property
def private_agent_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('SlaveServerGroup').physical_resource_id)
@property
def public_agent_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PublicSlaveServerGroup').physical_resource_id)
def get_master_ips(self):
return instances_to_hosts(self.master_instances)
def get_private_agent_ips(self):
return instances_to_hosts(self.private_agent_instances)
def get_public_agent_ips(self):
return instances_to_hosts(self.public_agent_instances)
class MasterStack(CleanupS3BucketMixin, CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('MasterServerGroup').physical_resource_id)
class PrivateAgentStack(CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PrivateAgentServerGroup').physical_resource_id)
class PublicAgentStack(CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PublicAgentServerGroup').physical_resource_id)
class DcosZenCfStack(CfStack):
"""Zen stacks are stacks that have the masters, infra, public agents, and private
agents split into resources stacks under one zen stack
"""
@classmethod
def create(cls, stack_name, boto_wrapper, template_url,
public_agents, private_agents, key_pair_name,
private_agent_type, public_agent_type, master_type,
gateway, vpc, private_subnet, public_subnet):
parameters = {
'KeyName': key_pair_name,
'Vpc': vpc,
'InternetGateway': gateway,
'MasterInstanceType': master_type,
'PublicAgentInstanceCount': public_agents,
'PublicAgentInstanceType': public_agent_type,
'PublicSubnet': public_subnet,
'PrivateAgentInstanceCount': private_agents,
'PrivateAgentInstanceType': private_agent_type,
'PrivateSubnet': private_subnet}
stack = boto_wrapper.create_stack(stack_name, parameters, template_url=template_url)
os_string = None
try:
os_string = template_url.split('/')[-1].split('.')[-2].split('-')[0]
ssh_info = CF_OS_SSH_INFO[os_string]
except (KeyError, IndexError):
log.critical('Unexpected template URL: {}'.format(template_url))
if os_string is not None:
log.critical('No SSH info for OS string: {}'.format(os_string))
raise
return cls(stack.stack_id, boto_wrapper), ssh_info
@property
def master_stack(self):
return MasterStack(
self.stack.Resource('MasterStack').physical_resource_id, self.boto_wrapper)
@property
def private_agent_stack(self):
return PrivateAgentStack(
self.stack.Resource('PrivateAgentStack').physical_resource_id, self.boto_wrapper)
@property
def public_agent_stack(self):
return PublicAgentStack(
self.stack.Resource('PublicAgentStack').physical_resource_id, self.boto_wrapper)
@property
def infrastructure(self):
return CfStack(self.stack.Resource('Infrastructure').physical_resource_id, self.boto_wrapper)
def delete(self):
log.info('Starting deletion of Zen CF stack')
# boto stacks become unusable after deletion (e.g. status/info checks) if name-based
self.stack = self.boto_wrapper.resource('cloudformation').Stack(self.stack.stack_id)
# These resources might have failed to create or been removed prior, except their
# failures and log it out
for s in [self.infrastructure, self.master_stack, self.private_agent_stack,
self.public_agent_stack]:
try:
s.delete()
except:
log.exception('Delete encountered an error!')
super().delete()
@property
def master_instances(self):
yield from self.master_stack.instances
@property
def private_agent_instances(self):
yield from self.private_agent_stack.instances
@property
def public_agent_instances(self):
yield from self.public_agent_stack.instances
def get_master_ips(self):
return instances_to_hosts(self.master_instances)
def get_private_agent_ips(self):
return instances_to_hosts(self.private_agent_instances)
def get_public_agent_ips(self):
return instances_to_hosts(self.public_agent_instances)
class BareClusterCfStack(CfStack):
@classmethod
def create(cls, stack_name, instance_type, instance_os, instance_count,
admin_location, key_pair_name, boto_wrapper):
stack = cls.create_from_ami(
stack_name,
instance_type,
OS_AMIS[instance_os][boto_wrapper.region],
instance_count,
admin_location,
key_pair_name,
boto_wrapper,
)
return stack, OS_SSH_INFO[instance_os]
@classmethod
def create_from_ami(cls, stack_name, instance_type, instance_ami, instance_count,
admin_location, key_pair_name, boto_wrapper):
template = template_by_instance_type(instance_type)
parameters = {
'KeyName': key_pair_name,
'AllowAccessFrom': admin_location,
'ClusterSize': instance_count,
'InstanceType': instance_type,
'AmiCode': instance_ami,
}
stack = boto_wrapper.create_stack(stack_name, parameters, template_body=template)
return cls(stack.stack_id, boto_wrapper)
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('BareServerAutoScale').physical_resource_id)
def get_host_ips(self):
return instances_to_hosts(self.instances)
SSH_INFO = {
'centos': SshInfo(
user='centos',
home_dir='/home/centos',
),
'coreos': SshInfo(
user='core',
home_dir='/home/core',
),
'debian': SshInfo(
user='admin',
home_dir='/home/admin',
),
'rhel': SshInfo(
user='ec2-user',
home_dir='/home/ec2-user',
),
'ubuntu': SshInfo(
user='ubuntu',
home_dir='/home/ubuntu',
),
}
OS_SSH_INFO = {
'cent-os-7': SSH_INFO['centos'],
'cent-os-7-dcos-prereqs': SSH_INFO['centos'],
'coreos': SSH_INFO['coreos'],
'debian-8': SSH_INFO['debian'],
'rhel-7': SSH_INFO['rhel'],
'ubuntu-16-04': SSH_INFO['ubuntu'],
}
CF_OS_SSH_INFO = {
'el7': SSH_INFO['centos'],
'coreos': SSH_INFO['coreos']
}
OS_AMIS = {
'cent-os-7': {'ap-northeast-1': 'ami-965345f8',
'ap-southeast-1': 'ami-332de750',
'ap-southeast-2': 'ami-c80320ab',
'eu-central-1': 'ami-1548ae7a',
'eu-west-1': 'ami-2ea92f5d',
'sa-east-1': 'ami-2921ad45',
'us-east-1': 'ami-fa9b9390',
'us-west-1': 'ami-12b3ce72',
'us-west-2': 'ami-edf11b8d'},
'cent-os-7-dcos-prereqs': {'ap-northeast-1': 'ami-1d50567a',
'ap-southeast-1': 'ami-f4a12097',
'ap-southeast-2': 'ami-0d50476e',
'eu-central-1': 'ami-d47fa4bb',
'eu-west-1': 'ami-b6c8ded0',
'sa-east-1': 'ami-41640d2d',
'us-east-1': 'ami-5f5d1449',
'us-west-1': 'ami-54614234',
'us-west-2': 'ami-61acce01'},
'coreos': {'ap-northeast-1': 'ami-84e0c7ea',
'ap-southeast-1': 'ami-84e0c7ea',
'ap-southeast-2': 'ami-f35b0590',
'eu-central-1': 'ami-fdd4c791',
'eu-west-1': 'ami-55d20b26',
'sa-east-1': 'ami-f35b0590',
'us-east-1': 'ami-37bdc15d',
'us-west-1': 'ami-27553a47',
'us-west-2': 'ami-00ebfc61'},
'debian-8': {'ap-northeast-1': 'ami-fe54f3fe',
'ap-southeast-1': 'ami-60989c32',
'ap-southeast-2': 'ami-07e3993d',
'eu-central-1': 'ami-b092aaad',
'eu-west-1': 'ami-0ed89d79',
'sa-east-1': 'ami-a5bd3fb8',
'us-east-1': 'ami-8b9a63e0',
'us-west-1': 'ami-a5d621e1',
'us-west-2': 'ami-3d56520d'},
'rhel-7': {'ap-northeast-1': 'ami-35556534',
'ap-southeast-1': 'ami-941031c6',
'ap-southeast-2': 'ami-83e08db9',
'eu-central-1': 'ami-e25e6cff',
'eu-west-1': 'ami-8cff51fb',
'sa-east-1': 'ami-595ce844',
'us-east-1': 'ami-a8d369c0',
'us-west-1': 'ami-33cdd876',
'us-west-2': 'ami-99bef1a9'},
'ubuntu-16-04': {'ap-northeast-1': 'ami-0919cd68',
'ap-southeast-1': 'ami-42934921',
'ap-southeast-2': 'ami-623c0d01',
'eu-central-1': 'ami-a9a557c6',
'eu-west-1': 'ami-643d4217',
'sa-east-1': 'ami-60bd2d0c',
'us-east-1': 'ami-2ef48339',
'us-west-1': 'ami-a9a8e4c9',
'us-west-2': 'ami-746aba14'}
}
| apache-2.0 | -2,762,029,711,024,407,600 | 40.457961 | 109 | 0.618037 | false |
COSMOGRAIL/PyCS | pycs/sim/old/frk.py | 1 | 5272 |
"""
forkmap -- Forking map(), uses all processors by default.
Connelly Barnes 2008, public domain. Based on forkmap by Kirk Strauser, rewritten and optimized. Version 1.0.2.
"""
import os, mmap, marshal, struct, cPickle
import ctypes, ctypes.util
import time, traceback
builtin_map = map
def nprocessors():
try:
try:
# Mac OS
libc=ctypes.cdll.LoadLibrary(ctypes.util.find_library('libc'))
v=ctypes.c_int(0)
size=ctypes.c_size_t(ctypes.sizeof(v))
libc.sysctlbyname('hw.ncpu', ctypes.c_voidp(ctypes.addressof(v)), ctypes.addressof(size), None, 0)
return v.value
except:
# Cygwin (Windows) and Linuxes
# Could try sysconf(_SC_NPROCESSORS_ONLN) (LSB) next. Instead, count processors in cpuinfo.
s = open('/proc/cpuinfo', 'r').read()
return s.replace(' ', '').replace('\t', '').count('processor:')
except:
return 1
nproc = nprocessors()
def map(f, *a, **kw):
"""
forkmap.map(..., n=nprocessors), same as map(...).
n must be a keyword arg; default n is number of physical processors.
"""
def writeobj(pipe, obj):
try:
s = marshal.dumps(obj)
s = struct.pack('i', len(s)) + s
except:
s = cPickle.dumps(obj)
s = struct.pack('i', -len(s)) + s
os.write(pipe, s)
def readobj(pipe):
n = struct.unpack('i', os.read(pipe, 4))[0]
s = ''
an = abs(n)
while len(s) < an:
s += os.read(pipe, min(65536, an-len(s)))
if n > 0:
return marshal.loads(s)
else:
return cPickle.loads(s)
n = kw.get('n', nproc)
if n == 1:
return builtin_map(f, *a)
if len(a) == 1:
L = a[0]
else:
L = zip(*a)
try:
len(L)
except TypeError:
L = list(L)
n = min(n, len(L))
ans = [None] * len(L)
pipes = [os.pipe() for i in range(n-1)]
for i in range(n):
if i < n-1 and not os.fork():
# Child, and not last processor
try:
try:
if len(a) == 1:
obj = builtin_map(f, L[i*len(L)//n:(i+1)*len(L)//n])
else:
obj = [f(*x) for x in L[i*len(L)//n:(i+1)*len(L)//n]]
except Exception, obj:
pass
writeobj(pipes[i][1], obj)
except:
traceback.print_exc()
finally:
os._exit(0)
elif i == n-1:
# Parent fork, and last processor
try:
if len(a) == 1:
ans[i*len(L)//n:] = builtin_map(f, L[i*len(L)//n:])
else:
ans[i*len(L)//n:] = [f(*x) for x in L[i*len(L)//n:]]
for k in range(n-1):
obj = readobj(pipes[k][0])
if isinstance(obj, Exception):
raise obj
ans[k*len(L)//n:(k+1)*len(L)//n] = obj
finally:
for j in range(n-1):
os.close(pipes[j][0])
os.close(pipes[j][1])
os.wait()
return ans
def bench():
print 'Benchmark:\n'
def timefunc(F):
start = time.time()
F()
return time.time() - start
def f1():
return builtin_map(lambda x: pow(x,10**1000,10**9), range(10**3))
def g1():
return map(lambda x: pow(x,10**1000,10**9), range(10**3))
def f2():
return builtin_map(lambda x: x**2, range(10**6))
def g2():
return map(lambda x: x**2, range(10**6))
import timeit
print 'Expensive operation, 10**3 items:'
print 'map (1 processor): ', timefunc(f1), 's'
print 'forkmap.map (%d processors):' % nproc, timefunc(g1), 's'
print
print 'Cheap operation, 10**6 items:'
print 'map (1 processor): ', timefunc(f2), 's'
print 'forkmap.map (%d processors):' % nproc, timefunc(g2), 's'
def test():
print 'Testing:'
assert [x**2 for x in range(10**4)] == map(lambda x: x**2, range(10**4))
assert [x**2 for x in range(10**4)] == map(lambda x: x**2, range(10**4), n=10)
assert [x**2 for x in range(10**4)] == map(lambda x: x**2, range(10**4), n=1)
assert [(x**2,) for x in range(10**3,10**4)] == map(lambda x: (x**2,), range(10**3,10**4))
assert [(x**2,) for x in range(10**3,10**4)] == map(lambda x: (x**2,), range(10**3,10**4), n=10)
assert [(x**2,) for x in range(10**3,10**4)] == map(lambda x: (x**2,), range(10**3,10**4), n=1)
assert builtin_map(lambda x,y:x+2*y, range(100),range(0,200,2)) == map(lambda x,y:x+2*y, range(100),range(0,200,2))
assert builtin_map(lambda x,y:x+2*y, range(100),range(0,200,2)) == map(lambda x,y:x+2*y, range(100),range(0,200,2), n=10)
assert builtin_map(lambda x,y:x+2*y, range(100),range(0,200,2)) == map(lambda x,y:x+2*y, range(100),range(0,200,2), n=2)
# Some Windows (Cygwin) boxes can't fork more than about 15 times, so only test to n=15
for n in range(1, 15):
assert [x**3 for x in range(200)] == map(lambda x: x**3, range(200), n=n)
def f(n):
if n == 1:
raise KeyError
def check_raises(func, exc):
e = None
try:
func()
except Exception, e:
pass
if not isinstance(e, exc):
raise ValueError('function did not raise specified error')
check_raises(lambda: map(f, [1, 0], n=2), KeyError)
check_raises(lambda: map(f, [0, 1], n=2), KeyError)
check_raises(lambda: map(f, [1, 0, 0], n=3), KeyError)
check_raises(lambda: map(f, [0, 1, 0], n=3), KeyError)
check_raises(lambda: map(f, [0, 0, 1], n=3), KeyError)
print 'forkmap.map: OK'
if __name__ == '__main__':
test()
bench()
| gpl-3.0 | -9,114,155,565,234,374,000 | 30.195266 | 123 | 0.55956 | false |
uxlsl/blog | movie/views.py | 1 | 2044 | import pytz
from django.views.generic import ListView
from django.db.models import Q
from django.contrib import messages
from rest_framework import viewsets, generics
from rest_framework import filters
from django.conf import settings
from .models import MovieRes, MovieUpdate, MovieNotify
from .serializers import MovieResSerializer, MovieNotifySerializer
class MovieList(ListView):
template_name = "movie_list.html"
context_object_name = "movie_list"
paginate_by = 10
def get_queryset(self):
if 'q' in self.request.GET:
queryset = MovieRes.objects.filter(
Q(movie__name__contains=self.request.GET['q'])
| Q(movie__starring__contains=self.request.GET['q'])
).order_by('-update_at')
else:
queryset = MovieRes.objects.order_by('-update_at')
return queryset
def get_context_data(self, **kwargs):
"""增加这个目的在于在页码中增加一个参数!
"""
ret = super(MovieList, self).get_context_data(**kwargs)
ret['q'] = self.request.GET.get('q', '')
if MovieUpdate.objects.count():
messages.add_message(self.request, messages.INFO,
'最后更新时间 {:%Y-%m-%d %H:%M:%S}'
.format(MovieUpdate.objects.first()
.update_at.astimezone(
pytz.timezone(settings.TIME_ZONE))))
return ret
class MovieResCreate(generics.CreateAPIView):
queryset = MovieRes.objects.all()
serializer_class = MovieResSerializer
def post(self, request, *args, **kwargs):
MovieUpdate.objects.create()
return super(MovieResCreate, self).post(request, *args, **kwargs)
class MovieNotifyViewSet(viewsets.ModelViewSet):
queryset = MovieNotify.objects.all()
model = MovieNotify
serializer_class = MovieNotifySerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('is_can_notify',)
| gpl-2.0 | 5,641,885,930,326,095,000 | 35.290909 | 73 | 0.628758 | false |
chubbymaggie/claripy | claripy/frontend_mixins/model_cache_mixin.py | 1 | 9949 | import weakref
import itertools
class ModelCache(object):
_defaults = { 0, 0.0, True }
def __init__(self, model):
self.model = model
self.replacements = weakref.WeakKeyDictionary()
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(frozenset(self.model.items())) #pylint:disable=attribute-defined-outside-init
return self._hash
def __eq__(self, other):
return self.model == other.model
def __getstate__(self):
return (self.model,)
def __setstate__(self, s):
self.model = s[0]
self.replacements = weakref.WeakKeyDictionary()
#
# Splitting support
#
def filter(self, variables):
return ModelCache({ k:v for k,v in self.model.iteritems() if k in variables })
@staticmethod
def combine(*models):
return ModelCache(dict(itertools.chain.from_iterable(m.model.iteritems() for m in models)))
#
# Model-driven evaluation
#
def _leaf_op(self, a):
return (
all_operations.BVV(self.model.get(a.args[0], 0), a.length) if a.op == 'BVS' else
all_operations.BoolV(self.model.get(a.args[0], True)) if a.op == 'BoolS' else
all_operations.FPV(self.model.get(a.args[0], 0.0), a.args[1]) if a.op == 'FPS' else
a
)
def eval_ast(self, ast):
new_ast = ast._replace(self.replacements, leaf_operation=self._leaf_op)
return backends.concrete.eval(new_ast, 1)[0]
def eval_constraints(self, constraints):
return all(self.eval_ast(c) for c in constraints)
def eval_list(self, asts):
return tuple(self.eval_ast(c) for c in asts )
class ModelCacheMixin(object):
_MODEL_LIMIT = 257
def __init__(self, *args, **kwargs):
super(ModelCacheMixin, self).__init__(*args, **kwargs)
self._models = set()
self._exhausted = False
self._eval_exhausted = weakref.WeakSet()
self._max_exhausted = weakref.WeakSet()
self._min_exhausted = weakref.WeakSet()
def _blank_copy(self, c):
super(ModelCacheMixin, self)._blank_copy(c)
c._models = set()
c._exhausted = False
c._eval_exhausted = weakref.WeakSet()
c._max_exhausted = weakref.WeakSet()
c._min_exhausted = weakref.WeakSet()
def _copy(self, c):
super(ModelCacheMixin, self)._copy(c)
c._models = set(self._models)
c._exhausted = self._exhausted
c._eval_exhausted = weakref.WeakSet(self._eval_exhausted)
c._max_exhausted = weakref.WeakSet(self._max_exhausted)
c._min_exhausted = weakref.WeakSet(self._min_exhausted)
def _ana_getstate(self):
return (
self._models,
self._exhausted,
tuple(self._eval_exhausted),
tuple(self._max_exhausted),
tuple(self._min_exhausted),
super(ModelCacheMixin, self)._ana_getstate()
)
def _ana_setstate(self, s):
(
self._models,
self._exhausted,
_eval_exhausted,
_max_exhausted,
_min_exhausted,
base_state
) = s
super(ModelCacheMixin, self)._ana_setstate(base_state)
self._eval_exhausted = weakref.WeakSet(_eval_exhausted)
self._max_exhausted = weakref.WeakSet(_max_exhausted)
self._min_exhausted = weakref.WeakSet(_min_exhausted)
#
# Model cleaning
#
def simplify(self, *args, **kwargs):
results = super(ModelCacheMixin, self).simplify(*args, **kwargs)
if len(results) > 0 and any(c is false for c in results):
self._models.clear()
return results
def add(self, constraints, invalidate_cache=True, **kwargs):
if len(constraints) == 0:
return constraints
old_vars = frozenset(self.variables)
added = super(ModelCacheMixin, self).add(constraints, **kwargs)
if len(added) == 0:
return added
new_vars = any(a.variables - old_vars for a in added)
if new_vars or invalidate_cache:
# shortcut for unsat
if any(c is false for c in constraints):
self._models.clear()
still_valid = set(self._get_models(extra_constraints=added))
if len(still_valid) != len(self._models):
self._exhausted = False
self._eval_exhausted.clear()
self._max_exhausted.clear()
self._min_exhausted.clear()
self._models = still_valid
return added
def split(self):
results = super(ModelCacheMixin, self).split()
for r in results:
r._models = { m.filter(r.variables) for m in self._models }
return results
def combine(self, others):
combined = super(ModelCacheMixin, self).combine(others)
if any(len(o._models) == 0 for o in others) or len(self._models) == 0:
# this would need a solve anyways, so screw it
return combined
vars_count = len(self.variables) + sum(len(s.variables) for s in others)
all_vars = self.variables.union(*[s.variables for s in others])
if vars_count != len(all_vars):
# this is the case where there are variables missing from the models.
# We'll need more intelligence here to handle it
return combined
model_lists = [ self._models ]
model_lists.extend(o._models for o in others)
combined._models.update(
ModelCache.combine(*product) for product in
itertools.islice(itertools.product(*model_lists), self._MODEL_LIMIT)
)
return combined
def update(self, other):
"""
Updates this cache mixin with results discovered by the other split off one.
"""
acceptable_models = [ m for m in other._models if set(m.model.keys()) == self.variables ]
self._models.update(acceptable_models)
self._eval_exhausted.update(other._eval_exhausted)
self._max_exhausted.update(other._max_exhausted)
self._min_exhausted.update(other._min_exhausted)
#
# Cache retrieval
#
def _model_hook(self, m):
if len(self._models) < self._MODEL_LIMIT:
self._models.add(ModelCache(m))
def _get_models(self, extra_constraints=()):
for m in self._models:
if m.eval_constraints(extra_constraints):
yield m
def _get_batch_solutions(self, asts, n=None, extra_constraints=()):
results = set()
for m in self._get_models(extra_constraints):
results.add(m.eval_list(asts))
if len(results) == n:
break
return results
def _get_solutions(self, e, n=None, extra_constraints=()):
return tuple(v[0] for v in self._get_batch_solutions(
[e], n=n, extra_constraints=extra_constraints
))
#
# Cached functions
#
def satisfiable(self, extra_constraints=(), **kwargs):
for _ in self._get_models(extra_constraints=extra_constraints):
return True
return super(ModelCacheMixin, self).satisfiable(extra_constraints=extra_constraints, **kwargs)
def batch_eval(self, asts, n, extra_constraints=(), **kwargs):
results = self._get_batch_solutions(asts, n=n, extra_constraints=extra_constraints)
if len(results) == n or (len(asts) == 1 and asts[0].cache_key in self._eval_exhausted):
return results
remaining = n - len(results)
# TODO: faster to concat?
if len(results) != 0:
constraints = (all_operations.And(*[
all_operations.Or(*[a!=v for a,v in zip(asts, r)]) for r in results
]),) + extra_constraints
else:
constraints = extra_constraints
try:
results.update(super(ModelCacheMixin, self).batch_eval(
asts, remaining, extra_constraints=constraints, **kwargs
))
except UnsatError:
if len(results) == 0:
raise
if len(extra_constraints) == 0 and len(results) < n:
self._eval_exhausted.update(e.cache_key for e in asts)
return results
def eval(self, e, n, **kwargs):
return tuple( r[0] for r in ModelCacheMixin.batch_eval(self, [e], n=n, **kwargs) )
def min(self, e, extra_constraints=(), **kwargs):
cached = self._get_solutions(e, extra_constraints=extra_constraints)
if len(cached) > 0 and (e.cache_key in self._eval_exhausted or e.cache_key in self._min_exhausted):
return min(cached)
else:
m = super(ModelCacheMixin, self).min(e, extra_constraints=extra_constraints, **kwargs)
self._min_exhausted.add(e.cache_key)
return m
def max(self, e, extra_constraints=(), **kwargs):
cached = self._get_solutions(e, extra_constraints=extra_constraints)
if len(cached) > 0 and (e.cache_key in self._eval_exhausted or e.cache_key in self._max_exhausted):
return max(cached)
else:
m = super(ModelCacheMixin, self).max(e, extra_constraints=extra_constraints, **kwargs)
self._max_exhausted.add(e.cache_key)
return m
def solution(self, e, v, extra_constraints=(), **kwargs):
if isinstance(v, Base):
cached = self._get_batch_solutions([e,v], extra_constraints=extra_constraints)
if (e,v) in map(tuple, cached):
return True
else:
cached = self._get_solutions(e, extra_constraints=extra_constraints)
if v in cached:
return True
return super(ModelCacheMixin, self).solution(e, v, extra_constraints=extra_constraints, **kwargs)
from .. import backends, false
from ..errors import UnsatError
from ..ast import all_operations, Base
| bsd-2-clause | 8,530,850,976,090,461,000 | 33.545139 | 107 | 0.589205 | false |
MarCnu/videojsXBlock | videojs/videojs.py | 1 | 5080 | """ videojsXBlock main Python class"""
import pkg_resources
from django.template import Context, Template
from xblock.core import XBlock
from xblock.fields import Scope, Integer, String, Boolean
from xblock.fragment import Fragment
class videojsXBlock(XBlock):
'''
Icon of the XBlock. Values : [other (default), video, problem]
'''
icon_class = "video"
'''
Fields
'''
display_name = String(display_name="Display Name",
default="Video JS",
scope=Scope.settings,
help="This name appears in the horizontal navigation at the top of the page.")
url = String(display_name="Video URL",
default="http://vjs.zencdn.net/v/oceans.mp4",
scope=Scope.content,
help="The URL for your video.")
allow_download = Boolean(display_name="Video Download Allowed",
default=True,
scope=Scope.content,
help="Allow students to download this video.")
source_text = String(display_name="Source document button text",
default="",
scope=Scope.content,
help="Add a download link for the source file of your video. Use it for example to provide the PowerPoint or PDF file used for this video.")
source_url = String(display_name="Source document URL",
default="",
scope=Scope.content,
help="Add a download link for the source file of your video. Use it for example to provide the PowerPoint or PDF file used for this video.")
start_time = String(display_name="Start time",
default="",
scope=Scope.content,
help="The start and end time of your video. Equivalent to 'video.mp4#t=startTime,endTime' in the url.")
end_time = String(display_name="End time",
default="",
scope=Scope.content,
help="The start and end time of your video. Equivalent to 'video.mp4#t=startTime,endTime' in the url.")
'''
Util functions
'''
def load_resource(self, resource_path):
"""
Gets the content of a resource
"""
resource_content = pkg_resources.resource_string(__name__, resource_path)
return unicode(resource_content)
def render_template(self, template_path, context={}):
"""
Evaluate a template by resource path, applying the provided context
"""
template_str = self.load_resource(template_path)
return Template(template_str).render(Context(context))
'''
Main functions
'''
def student_view(self, context=None):
"""
The primary view of the XBlock, shown to students
when viewing courses.
"""
fullUrl = self.url
if self.start_time != "" and self.end_time != "":
fullUrl += "#t=" + self.start_time + "," + self.end_time
elif self.start_time != "":
fullUrl += "#t=" + self.start_time
elif self.end_time != "":
fullUrl += "#t=0," + self.end_time
context = {
'display_name': self.display_name,
'url': fullUrl,
'allow_download': self.allow_download,
'source_text': self.source_text,
'source_url': self.source_url
}
html = self.render_template('static/html/videojs_view.html', context)
frag = Fragment(html)
frag.add_css(self.load_resource("static/css/video-js.min.css"))
frag.add_css(self.load_resource("static/css/videojs.css"))
frag.add_javascript(self.load_resource("static/js/video-js.js"))
frag.add_javascript(self.load_resource("static/js/videojs_view.js"))
frag.initialize_js('videojsXBlockInitView')
return frag
def studio_view(self, context=None):
"""
The secondary view of the XBlock, shown to teachers
when editing the XBlock.
"""
context = {
'display_name': self.display_name,
'url': self.url,
'allow_download': self.allow_download,
'source_text': self.source_text,
'source_url': self.source_url,
'start_time': self.start_time,
'end_time': self.end_time
}
html = self.render_template('static/html/videojs_edit.html', context)
frag = Fragment(html)
frag.add_javascript(self.load_resource("static/js/videojs_edit.js"))
frag.initialize_js('videojsXBlockInitStudio')
return frag
@XBlock.json_handler
def save_videojs(self, data, suffix=''):
"""
The saving handler.
"""
self.display_name = data['display_name']
self.url = data['url']
self.allow_download = True if data['allow_download'] == "True" else False # Str to Bool translation
self.source_text = data['source_text']
self.source_url = data['source_url']
self.start_time = ''.join(data['start_time'].split()) # Remove whitespace
self.end_time = ''.join(data['end_time'].split()) # Remove whitespace
return {
'result': 'success',
}
| gpl-3.0 | 1,623,204,342,252,490,200 | 35.028369 | 148 | 0.599016 | false |
frugalware/pacman-tools | syncpkgd/syncpkgdctl.py | 1 | 1055 | #!/usr/bin/env python
import xmlrpclib, time, os, base64, re, sys
sys.path.append("/etc/syncpkgd")
from ctlconfig import config
server = xmlrpclib.Server(config.server_url)
if len(sys.argv) > 1:
if sys.argv[1] == "-d":
if server.cancel_build(config.server_user, config.server_pass, sys.argv[2]):
print "Okay, the service will not build this package for you."
else:
print "Oops, something went wrong. Maybe this package was already removed from the queue?"
else:
if server.request_build(config.server_user, config.server_pass, sys.argv[1]):
print "Okay, the service will build this package for you."
else:
print "Oops, something went wrong. Maybe this package is already in the queue?"
else:
print """At the moment the following packages are waiting to be built:"""
for i, item in enumerate(server.get_todo(config.server_user, config.server_pass)):
print '\t%s. %s' % (i+1, item.encode('utf-8'))
print "Please note that this list does not include failed or already started builds."
print "See man syncpkgdctl for more info."
| gpl-2.0 | -2,957,583,666,801,462,300 | 41.2 | 93 | 0.723223 | false |
bsaleil/lc | tools/graphs.py | 1 | 14708 | #!/usr/bin/env python3
#!/usr/bin/python3
#---------------------------------------------------------------------------
#
# Copyright (c) 2015, Baptiste Saleil. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#---------------------------------------------------------------------------
# No font with Ubuntu:
# http://stackoverflow.com/questions/11354149/python-unable-to-render-tex-in-matplotlib
# Execute compiler with stats option for all benchmarks
# Parse output
# Draw graphs
help = """
graphs.py - Generate graphs from compiler output
Use:
graphs.py [OPTION...]
Options:
-h,--help
Print this help.
--drawall
Draw all graphs. By default the script let the user choose the information to draw.
--stdexec
Use standard execution. Same as --exec="Standard;"?
--exec="DESCRIPTION;COMPILER_OPTION1 COMPILER_OPTION2 ..."
Add execution with given compiler options. All given executions are drawn
Example:
graphs.py --exec="Standard exec;" --exec="With all tests;--all-tests" --drawall
Draw all graphs for both executions (Standard, and with all-tests option).
graphs.py --stdexec
Let the user interactively choose the information to draw from only standard execution.
"""
import sys
import io
import glob
import os
import subprocess
from pylab import *
from copy import deepcopy
from matplotlib.backends.backend_pdf import PdfPages
# Constants
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) + '/' # Current script path
LC_PATH = SCRIPT_PATH + '../' # Compiler path
LC_EXEC = 'lazy-comp' # Compiler exec name
PDF_OUTPUT = SCRIPT_PATH + 'graphs.pdf' # PDF output file
BENCH_PATH = LC_PATH + 'benchmarks/*.scm' # Benchmarks path
BAR_COLORS = ["#222222","#555555","#888888","#AAAAAA","#DDDDDD"] # Bar colors
BAR_COLORS = ["#BBBBBB","#999999","#777777","#555555","#333333"] # Bar colors
#BAR_COLORS = ["#222222", "#666666", "#AAAAAA", "#EEEEEE"] # Paper sw15
FONT_SIZE = 9
# Parser constants, must match compiler --stats output
CSV_INDICATOR = '--'
STAT_SEPARATOR = ':'
CSV_SEPARATOR = ';'
# Options
DRAW_ALL = '--drawall' # Draw all graphs
STD_EXEC = '--stdexec' # Add standard execution to executions list
REF_EXEC = '--refexec' # Set reference execution for scale
SORT_EXEC = '--sortexec' # Sort
OPT_REF = False
OPT_SORT = False
# Globals
execs = {}
lexecs = []
printhelp = False
# Set current working directory to compiler path
os.chdir(LC_PATH)
# Get all benchmarks full path sorted by name
files = sorted(glob.glob(BENCH_PATH))
# Graph config
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
matplotlib.rcParams.update({'font.size': FONT_SIZE})
#-------------------------------------------------------------------------------------
# Utils
def num(s):
try:
return int(s)
except ValueError:
return float(s)
def WARNING(s):
print('WARNING: ' + s)
# Used as matplotlib formatter
def to_percent(y, position):
s = str(int(y))
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
#-------------------------------------------------------------------------------------
# Main
def setargs():
global printhelp
global OPT_REF
global OPT_SORT
if '-h' in sys.argv or '--help' in sys.argv:
printhelp = True
if STD_EXEC in sys.argv:
execs['Standard'] = ''
if REF_EXEC in sys.argv:
OPT_REF = sys.argv[sys.argv.index(REF_EXEC)+1]
if SORT_EXEC in sys.argv:
OPT_SORT = sys.argv[sys.argv.index(SORT_EXEC)+1]
for arg in sys.argv:
if arg.startswith('--exec='):
pair = arg[7:].split(';')
name = pair[0]
lcargs = pair[1].split()
execs[name] = lcargs
lexecs.append(name)
def go():
if printhelp:
print(help)
else:
# 1 - run benchmarks and parse compiler output
benchs_data = {}
keys = []
for ex in execs:
ks,data = runparse(execs[ex]) # TODO : donner arguments
if keys == []:
keys = ks
else:
if len(ks) != len(keys):
raise Exception("Error")
benchs_data[ex] = data
# 2 - Draw all graphs
drawGraphs(keys,benchs_data)
print('Done!')
# Run compiler with 'opts', parse output and return keys and data
def runparse(opts):
print("Running with options: '" + ' '.join(opts) + "'")
data = {}
# Get keys
first = files[0]
keys = []
for file in files:
file_name = os.path.basename(file)
print(file_name + '...')
options = [LC_PATH + LC_EXEC, file, '--stats']
options.extend(opts) # TODO : renommer 'options'
output = subprocess.check_output(options).decode("utf-8")
bench_data = parseOutput(output)
data[file_name] = bench_data
# Get keys on first result
if file == first:
for key in bench_data:
keys.append(key)
return keys,data
#-------------------------------------------------------------------------------------
# Parser: Read stats output from compiler and return python table representation
# Read 'KEY:VALUE' stat
def readStat(stream,data,line):
stat = line.split(STAT_SEPARATOR)
key = stat[0].strip()
val = num(stat[1].strip())
# Store key/value in global data
data[key] = val
line = stream.readline()
return line
# Read CSV stat
def readCSV(stream,data):
csv = []
# Consume CSV indicator line
line = stream.readline()
# Read table title
title = line.strip()
line = stream.readline()
# Read table header
header = line.split(CSV_SEPARATOR)
for el in header:
csv.append([el.strip()])
# Read CSV data
line = stream.readline()
while not line.startswith(CSV_INDICATOR):
linecsv = line.split(CSV_SEPARATOR)
for i in range(0,len(linecsv)):
csv[i].extend([num(linecsv[i].strip())]) ## THIS IS NOT EFFICIENT (for large CSV outputs)
line = stream.readline()
# Store key/value (title/csv) in global data
data[title] = csv
# Consume CSV indicator line
line = stream.readline()
return line
# Return python table from compiler 'output'
def parseOutput(output):
# Data for this benchmark
data = {}
# Stream
stream = io.StringIO(output)
# Parse
line = stream.readline()
while line:
# CSV table
if line.startswith(CSV_INDICATOR):
line = readCSV(stream,data)
# Key/Value line
else:
line = readStat(stream,data,line)
return data
#-------------------------------------------------------------------------------------
# Draw
# Draw all graphs associated to keys using benchs_data
# benchs_data contains all information for all benchmarks for all executions
# ex. benchs_data['Standard']['array1.scm']['Closures'] to get the number of
# closures created for benchmark array1.scm using standard exec
def drawGraphs(keys,benchs_data):
# Let user choose the graph to draw (-1 or empty for all graphs)
if not DRAW_ALL in sys.argv:
sortedKeys = sorted(keys)
print('Keys:')
print('-1: ALL')
for i in range(0,len(sortedKeys)):
print(' ' + str(i) + ': ' + sortedKeys[i])
inp = input('Key to draw (all) > ')
if not inp == '':
choice = num(inp)
if choice >= 0:
keys = [sortedKeys[choice]]
firstExec = list(benchs_data.keys())[0]
firstBenchmark = os.path.basename(files[0])
# Gen pdf output file
pdf = PdfPages(PDF_OUTPUT)
# For each key
for key in keys:
# CSV, NYI
if type(benchs_data[firstExec][firstBenchmark][key]) == list:
drawCSV(pdf,key,benchs_data)
# Key/Value, draw graph
else:
print("Drawing '" + key + "'...")
drawKeyValueGraph(pdf,key,benchs_data)
pdf.close()
## This is a specific implementation for #stubs/#versions
## TODO: Do something generic !
def drawCSV(pdf,key,benchs_data):
fig = plt.figure(key)
title = key
res = {}
for execution in benchs_data:
for bench in benchs_data[execution]:
for data in benchs_data[execution][bench][key]:
if data[0] == '#stubs':
for i in range(0,len(data)-1):
index = i+1
numvers = i
if (numvers >= 5):
numvers = -1
if (numvers in res):
res[numvers] += data[index]
else:
res[numvers] = data[index]
xvals = []
yvals = []
labels = []
keys = sorted(res.keys())
for key in keys:
if key != 0 and key != -1:
xvals.append(key)
yvals.append(res[key])
labels.append(key)
xvals.append(len(xvals)+1)
yvals.append(res[-1])
labels.append('>=5')
sum = 0
for val in yvals:
sum += val
for i in range(0,len(yvals)):
p = (yvals[i] * 100) / sum
yvals[i] = p
plt.title(title + ' (total=' + str(sum) + ')')
X = np.array(xvals)
Y = np.array(yvals)
bar(X, +Y, 1, facecolor=BAR_COLORS[0], edgecolor='white', label=key, zorder=10)
axes = gca()
axes.get_xaxis().set_visible(False)
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
for i in range(0,len(labels)):
text(X[i]+0.25, -0.0, labels[i], ha='right', va='top')
# print(xvals)
# print(yvals)
# print(labels)
# print(res)
pdf.savefig(fig)
# Draw graph for given key
# Y: values for this key
# X: benchmarks
def drawKeyValueGraph(pdf,key,benchs_data):
fig = plt.figure(key,figsize=(8,3.4))
#plt.title(key)
exec_ref = ''
# Number of benchmarks
firstExec = list(benchs_data.keys())[0]
n = len(benchs_data[firstExec]) + 1 # +1 for mean
X = np.arange(n) # X set is [0, 1, ..., n-1]
Ys = {}
# For each exec
for d in benchs_data:
Y = []
# For each benchmark
for f in files:
Y.extend([benchs_data[d][os.path.basename(f)][key]])
# Transforme en tableau numpy
Y = np.array(Y)
Ys[d] = Y
width = (1 / (len(Ys)+1)) # +1 for mean
#----------
# TODO: move to external fn
# Use a reference execution. All values for this exec are 100%
# Values for others executions are computed from this reference exec
if OPT_REF:
# Add % symbol to y values
formatter = FuncFormatter(to_percent)
plt.gca().yaxis.set_major_formatter(formatter)
exec_ref = OPT_REF # Reference execution (100%)
Y2 = deepcopy(Ys) # Deep copy of Y values
# Set all references to 100
for v in range(0,len(Y2[exec_ref])):
Y2[exec_ref][v] = '100'
# For each exec which is not ref exec
candraw = True # TODO : rename
for ex in Y2:
if ex != exec_ref:
for i in range(0,len(Y2[ex])):
ref = Ys[exec_ref][i]
cur = Ys[ex][i]
# We can't compute %, warning and stop
if ref == 0:
WARNING("Can't draw '" + key + "' using a reference execution.")
return
# Compute % and set
else:
Y2[ex][i] = (cur*100)/ref
# Y2 are the new values to draw
Ys = Y2
#----------
fileList = files
Yvals = Ys
# Sort Y values by a given execution
if OPT_SORT:
fileList,Yvals = sortByExecution(Yvals,OPT_SORT)
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
i = 0
# TODO: add to --help: the script draws the exec bar in order
for key in lexecs:
if key != exec_ref:
Y = Yvals[key]
color = BAR_COLORS[i]
arith_mean = sum(Y) / float(len(Y))
print("MEANS:")
print(key + ": " + str(arith_mean))
Y = np.append(Y,[arith_mean]) # Add mean before drawing bars
bar(X+(i*width)+0.05, +Y, width, facecolor=color, linewidth=0, label=key)
i += 1
# Hide X values
axes.get_xaxis().set_visible(False)
plt.tick_params(axis='both', which='minor')
# # Set Y limit
#l = len(str(max(Y2))) # number of digit of max value
#ylim(0,max(Y2)+pow(10,l-1)) # Y is from 0 to (max + 10^i-1)
# # Draw values for each bar
# for x,y in zip(X,Y1):
# text(x+0.4, y+0.05, '%.2f' % y, ha='center', va= 'bottom')
ylim(0,120)
xlim(0,n)
# Draw benchmark name
names = fileList
names.append("ari-mean.scm") # Add mean name
for i in range(0,len(fileList)):
text(X[i]+0.40, -3, os.path.basename(fileList[i])[:-4], rotation=90, ha='center', va='top')
# Legend:
# Shrink by 10% on the bottom
box = axes.get_position()
axes.set_position([box.x0, box.y0 + box.height * 0.34, box.width, box.height * 0.66])
# Put a legend below axis
ncol = int(len(lexecs)/3);
legend(loc='upper center', bbox_to_anchor=(0., 0., 1., -0.35), prop={'size':FONT_SIZE}, ncol=ncol, mode='expand', borderaxespad=0.)
# Save to pdf
pdf.savefig(fig)
#-------------------------------------------------------------------------------------
# Manage Y values
# Sort Y values by values from a specific execution
def sortByExecution(Ys,execref):
# Pseudo-decorate: Change data layout to allow the useof sort()
decorated = []
for fileIndex in range(0,len(files)):
r = [] # List of results for current file
for execution in Ys:
r.extend([execution,Ys[execution][fileIndex]])
r.append(files[fileIndex])
decorated.append(r)
# Sort
i = decorated[0].index(execref)
decorated = sorted(decorated,key=lambda el: el[i+1])
# Pseudo-undecorate: Restore previous layout with sorted data
undecorated = {}
ordered_files = []
i = 0;
while not decorated[0][i] in files:
execution = decorated[0][i]
vals = []
# For each data associated to file
for el in decorated:
vals.append(el[i+1])
filepath = el[len(el)-1]
if not filepath in ordered_files:
ordered_files.append(filepath)
undecorated[execution] = np.asarray(vals);
i+=2
return(ordered_files,undecorated)
#-------------------------------------------------------------------------------------
setargs()
go()
| bsd-3-clause | 3,920,754,107,737,802,000 | 26.961977 | 132 | 0.628298 | false |
spohnan/geowave | python/src/main/python/pygw/query/vector/__init__.py | 1 | 1243 | #
# Copyright (c) 2013-2019 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
"""
This module contains the classes needed for querying vector data.
It contains the following import shortcuts:
```python
from pygw.query.vector import SpatialTemporalConstraintsBuilder
from pygw.query.vector import VectorQueryConstraintsFactory
from pygw.query.vector import FilterFactory
from pygw.query.vector import SimpleFeatureTransformer
from pygw.query.vector import VectorQueryBuilder
```
"""
from .spatial_temporal_constraints_builder import SpatialTemporalConstraintsBuilder
from .vector_query_constraints_factory import VectorQueryConstraintsFactory
from .filter_factory import FilterFactory
from .simple_feature_transformer import SimpleFeatureTransformer
from .vector_query_builder import VectorQueryBuilder
| apache-2.0 | -7,345,824,623,308,834,000 | 46.807692 | 96 | 0.77313 | false |
openstack/manila | manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py | 1 | 6351 | # Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from oslo_utils import units
from manila.share.drivers.dell_emc.plugins.unity import utils
from manila import test
class MockSP(object):
def __init__(self, sp_id):
self.sp_id = sp_id
def get_id(self):
return self.sp_id
SPA = MockSP('spa')
SPB = MockSP('spb')
class MockPort(object):
def __init__(self, sp, port_id, mtu):
self._sp = sp
self.port_id = port_id
self.mtu = mtu
def get_id(self):
return self.port_id
@property
def parent_storage_processor(self):
return self._sp
SPA_ETH0 = MockPort(SPA, 'spa_eth0', 1500)
SPA_ETH1 = MockPort(SPA, 'spa_eth1', 9000)
SPB_ETH0 = MockPort(SPB, 'spb_eth0', 1500)
SPB_ETH1 = MockPort(SPB, 'spb_eth1', 9000)
SPA_LA1 = MockPort(SPA, 'spa_la_1', 1500)
SPB_LA1 = MockPort(SPB, 'spb_la_1', 1500)
@ddt.ddt
class TestUtils(test.TestCase):
@ddt.data({'matcher': None,
'matched': {'pool_1', 'pool_2', 'nas_server_pool'},
'not_matched': set()},
{'matcher': ['*'],
'matched': {'pool_1', 'pool_2', 'nas_server_pool'},
'not_matched': set()},
{'matcher': ['pool_*'],
'matched': {'pool_1', 'pool_2'},
'not_matched': {'nas_server_pool'}},
{'matcher': ['*pool'],
'matched': {'nas_server_pool'},
'not_matched': {'pool_1', 'pool_2'}},
{'matcher': ['nas_server_pool'],
'matched': {'nas_server_pool'},
'not_matched': {'pool_1', 'pool_2'}},
{'matcher': ['nas_*', 'pool_*'],
'matched': {'pool_1', 'pool_2', 'nas_server_pool'},
'not_matched': set()})
def test_do_match(self, data):
full = ['pool_1 ', ' pool_2', ' nas_server_pool ']
matcher = data['matcher']
expected_matched = data['matched']
expected_not_matched = data['not_matched']
matched, not_matched = utils.do_match(full, matcher)
self.assertEqual(expected_matched, matched)
self.assertEqual(expected_not_matched, not_matched)
@ddt.data({'ports': [SPA_ETH0, SPB_ETH0],
'ids_conf': None,
'port_map': {'spa': {'spa_eth0'}, 'spb': {'spb_eth0'}},
'unmanaged': set()},
{'ports': [SPA_ETH0, SPB_ETH0],
'ids_conf': [' '],
'port_map': {'spa': {'spa_eth0'}, 'spb': {'spb_eth0'}},
'unmanaged': set()},
{'ports': [SPA_ETH0, SPB_ETH0, SPA_ETH1],
'ids_conf': ['spa*'],
'port_map': {'spa': {'spa_eth0', 'spa_eth1'}},
'unmanaged': {'spb_eth0'}},
)
@ddt.unpack
def test_match_ports(self, ports, ids_conf, port_map, unmanaged):
sp_ports_map, unmanaged_port_ids = utils.match_ports(ports,
ids_conf)
self.assertEqual(port_map, sp_ports_map)
self.assertEqual(unmanaged, unmanaged_port_ids)
def test_find_ports_by_mtu(self):
all_ports = [SPA_ETH0, SPB_ETH0, SPA_ETH1, SPB_ETH1, SPA_LA1,
SPB_LA1]
port_ids_conf = '*'
port_map = utils.find_ports_by_mtu(all_ports, port_ids_conf, 1500)
self.assertEqual({'spa': {'spa_eth0', 'spa_la_1'},
'spb': {'spb_eth0', 'spb_la_1'}},
port_map)
def test_gb_to_byte(self):
self.assertEqual(3 * units.Gi, utils.gib_to_byte(3))
def test_get_snapshot_id(self):
snapshot = {'provider_location': '23047-ef2344-4563cvw-r4323cwed',
'id': 'test_id'}
result = utils.get_snapshot_id(snapshot)
expected = '23047-ef2344-4563cvw-r4323cwed'
self.assertEqual(expected, result)
def test_get_snapshot_id_without_pl(self):
snapshot = {'provider_location': '', 'id': 'test_id'}
result = utils.get_snapshot_id(snapshot)
expected = 'test_id'
self.assertEqual(expected, result)
def test_get_nfs_share_id(self):
nfs_share = {'export_locations':
[{'path': '10.10.1.12:/addf-97e-46c-8ac6-55922f',
'share_instance_id': 'e24-457e-47-12c6-gf345'}],
'share_proto': 'NFS', 'id': 'test_nfs_id'}
result = utils.get_share_backend_id(nfs_share)
expected = 'addf-97e-46c-8ac6-55922f'
self.assertEqual(expected, result)
def test_get_nfs_share_id_without_path(self):
nfs_share = {'export_locations':
[{'path': '',
'share_instance_id': 'ev24-7e-4-12c6-g45245'}],
'share_proto': 'NFS', 'id': 'test_nfs_id'}
result = utils.get_share_backend_id(nfs_share)
expected = 'test_nfs_id'
self.assertEqual(expected, result)
def test_get_cifs_share_id(self):
cifs_share = {'export_locations':
[{'path': '\\\\17.66.5.3\\bdf-h4e-42c-122c5-b212',
'share_instance_id': 'ev4-47e-48-126-gfbh452'}],
'share_proto': 'CIFS', 'id': 'test_cifs_id'}
result = utils.get_share_backend_id(cifs_share)
expected = 'bdf-h4e-42c-122c5-b212'
self.assertEqual(expected, result)
def test_get_cifs_share_id_without_path(self):
cifs_share = {'export_locations':
[{'path': '',
'share_instance_id': 'ef4-47e-48-12c6-gf452'}],
'share_proto': 'CIFS', 'id': 'test_cifs_id'}
result = utils.get_share_backend_id(cifs_share)
expected = 'test_cifs_id'
self.assertEqual(expected, result)
| apache-2.0 | 3,944,791,302,127,216,000 | 37.259036 | 78 | 0.537553 | false |
d-li14/CS231n-Assignments | assignment2/cs231n/data_utils.py | 1 | 9180 | from __future__ import print_function
# from builtins import range
from six.moves import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d'
% (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * \
np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]]
for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt)
will be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
def load_imagenet_val(num=None):
"""Load a handful of validation images from ImageNet.
Inputs:
- num: Number of images to load (max of 25)
Returns:
- X: numpy array with shape [num, 224, 224, 3]
- y: numpy array of integer image labels, shape [num]
- class_names: dict mapping integer label to class name
"""
imagenet_fn = 'cs231n/datasets/imagenet_val_25.npz'
if not os.path.isfile(imagenet_fn):
print('file %s not found' % imagenet_fn)
print('Run the following:')
print('cd cs231n/datasets')
print('bash get_imagenet_val.sh')
assert False, 'Need to download imagenet_val_25.npz'
f = np.load(imagenet_fn)
X = f['X']
y = f['y']
class_names = f['label_map'].item()
if num is not None:
X = X[:num]
y = y[:num]
return X, y, class_names
| gpl-3.0 | 6,853,406,791,972,794,000 | 34.038168 | 79 | 0.587037 | false |
toumorokoshi/sprinter | sprinter/formula/command.py | 1 | 2334 | """
Runs a command
[ssh]
formula = sprinter.formula.command
redirect_stdout_to_log=false
fail_on_error=true
shell = False
install=echo 'setting up...'
update=echo 'updating...'
remove=echo 'destroying...'
activate=echo 'activating...'
deactivate=echo 'deactivating...'
"""
from __future__ import unicode_literals
from sprinter.formula.base import FormulaBase
from sprinter.exceptions import FormulaException
import sprinter.lib as lib
import subprocess
class CommandFormulaException(FormulaException):
pass
class CommandFormula(FormulaBase):
valid_options = FormulaBase.valid_options + ['install',
'update',
'remove',
'activate',
'deactivate',
'fail_on_error',
'shell',
'redirect_stdout_to_log']
def install(self):
self.__run_command('install', 'target')
FormulaBase.install(self)
def update(self):
value = self.__run_command('update', 'target')
return value or FormulaBase.update(self)
def remove(self):
self.__run_command('remove', 'source')
FormulaBase.remove(self)
def activate(self):
self.__run_command('activate', 'source')
FormulaBase.activate(self)
def deactivate(self):
self.__run_command('deactivate', 'source')
FormulaBase.deactivate(self)
def __run_command(self, command_type, manifest_type):
config = getattr(self, manifest_type)
if config.has(command_type):
command = config.get(command_type)
self.logger.debug("Running %s..." % command)
shell = config.has('shell') and config.is_affirmative('shell')
stdout = subprocess.PIPE if config.is_affirmative('redirect_stdout_to_log', 'true') else None
return_code, output = lib.call(command, shell=shell, stdout=stdout)
if config.is_affirmative('fail_on_error', True) and return_code != 0:
raise CommandFormulaException("Command returned a return code of {0}!".format(return_code))
return True
| mit | 7,646,238,375,529,140,000 | 33.835821 | 107 | 0.568552 | false |
joberreiter/pyload | module/plugins/hooks/JustPremium.py | 1 | 2420 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Addon import Addon
class JustPremium(Addon):
__name__ = "JustPremium"
__type__ = "hook"
__version__ = "0.24"
__status__ = "testing"
__config__ = [("activated", "bool", "Activated" , False),
("excluded" , "str" , "Exclude hosters (comma separated)", "" ),
("included" , "str" , "Include hosters (comma separated)", "" )]
__description__ = """Remove not-premium links from added urls"""
__license__ = "GPLv3"
__authors__ = [("mazleu" , "[email protected]"),
("Walter Purcaro", "[email protected]" ),
("immenz" , "[email protected]" )]
def init(self):
self.event_map = {'linksAdded': "links_added"}
def links_added(self, links, pid):
hosterdict = self.pyload.pluginManager.hosterPlugins
linkdict = self.pyload.api.checkURLs(links)
premiumplugins = set(account.type for account in self.pyload.api.getAccounts(False) \
if account.valid and account.premium)
multihosters = set(hoster for hoster in self.pyload.pluginManager.hosterPlugins \
if 'new_name' in hosterdict[hoster] \
and hosterdict[hoster]['new_name'] in premiumplugins)
excluded = map(lambda domain: "".join(part.capitalize() for part in re.split(r'(\.|\d+)', domain) if part != '.'),
self.get_config('excluded').replace(' ', '').replace(',', '|').replace(';', '|').split('|'))
included = map(lambda domain: "".join(part.capitalize() for part in re.split(r'(\.|\d+)', domain) if part != '.'),
self.get_config('included').replace(' ', '').replace(',', '|').replace(';', '|').split('|'))
hosterlist = (premiumplugins | multihosters).union(excluded).difference(included)
#: Found at least one hoster with account or multihoster
if not any( True for pluginname in linkdict if pluginname in hosterlist ):
return
for pluginname in set(linkdict.keys()) - hosterlist:
self.log_info(_("Remove links of plugin: %s") % pluginname)
for link in linkdict[pluginname]:
self.log_debug("Remove link: %s" % link)
links.remove(link)
| gpl-3.0 | -6,310,847,757,710,996,000 | 43.814815 | 122 | 0.544628 | false |
Ingener74/Small-Screwdriver | SmallScrewdriver/Tests/test_size.py | 1 | 5240 | # encoding: utf8
from unittest import TestCase
from SmallScrewdriver import Size
class TestSize(TestCase):
def test_area(self):
size = Size(100, 100)
self.assertEqual(size.area(), 10000)
size = Size(10, 10)
self.assertEqual(size.area(), 100)
size = Size(20, 20)
self.assertEqual(size.area(), 400)
def test_eq(self):
# TODO больше тестов !!!!
s1 = Size(10, 10)
s2 = Size(20, 20)
s3 = Size(10, 20)
s4 = Size(10, 10)
self.assertEqual(s1, s4)
self.assertNotEquals(s1, s2)
self.assertNotEqual(s1, s3)
def test_ne(self):
s1 = Size(10, 10)
s2 = Size(20, 20)
s3 = Size(10, 20)
s4 = Size(10, 10)
self.assertNotEquals(s1, s2)
self.assertNotEqual(s1, s3)
self.assertNotEqual(s2, s3)
self.assertNotEqual(s2, s4)
self.assertNotEqual(s3, s4)
def test_lt(self):
s1 = Size(8, 6)
s2 = Size(20, 20)
s3 = Size(7, 20)
s4 = Size(11, 9)
self.assertLess(s1, s2)
self.assertLess(s4, s2)
self.assertFalse(s3 < s2)
def test_is_inscribed(self):
s1 = Size(10, 10)
s2 = Size(20, 20)
s3 = Size(8, 20)
s4 = Size(40, 5)
s5 = Size(4, 4)
self.assertFalse(s1.canInscribe(s2))
self.assertFalse(s1.canInscribe(s3))
self.assertFalse(s1.canInscribe(s4))
self.assertTrue(s1.canInscribe(s5))
self.assertTrue(s2.canInscribe(s1))
self.assertFalse(s2.canInscribe(s3))
self.assertFalse(s2.canInscribe(s4))
self.assertTrue(s2.canInscribe(s5))
self.assertFalse(s3.canInscribe(s1))
self.assertFalse(s3.canInscribe(s2))
self.assertFalse(s3.canInscribe(s4))
self.assertTrue(s3.canInscribe(s5))
self.assertFalse(s4.canInscribe(s1))
self.assertFalse(s4.canInscribe(s2))
self.assertFalse(s4.canInscribe(s3))
self.assertFalse(s4.canInscribe(s3))
self.assertFalse(s5.canInscribe(s2))
self.assertFalse(s5.canInscribe(s3))
self.assertFalse(s5.canInscribe(s1))
self.assertFalse(s5.canInscribe(s3))
def test_less(self):
s1 = Size(10, 10)
s2 = Size(20, 20)
s3 = Size(8, 20)
s4 = Size(40, 5)
s5 = Size(4, 4)
self.assertEqual(s1.less(s1), (False, False))
self.assertEqual(s1.less(s2), (True, True))
self.assertEqual(s1.less(s3), (False, True))
self.assertEqual(s1.less(s4), (True, False))
self.assertEqual(s1.less(s5), (False, False))
self.assertEqual(s2.less(s1), (False, False))
self.assertEqual(s2.less(s2), (False, False))
self.assertEqual(s2.less(s3), (False, False))
self.assertEqual(s2.less(s4), (True, False))
self.assertEqual(s2.less(s5), (False, False))
self.assertEqual(s3.less(s1), (True, False))
self.assertEqual(s3.less(s2), (True, False))
self.assertEqual(s3.less(s3), (False, False))
self.assertEqual(s3.less(s4), (True, False))
self.assertEqual(s3.less(s5), (False, False))
self.assertEqual(s4.less(s1), (False, True))
self.assertEqual(s4.less(s2), (False, True))
self.assertEqual(s4.less(s3), (False, True))
self.assertEqual(s4.less(s4), (False, False))
self.assertEqual(s4.less(s5), (False, False))
self.assertEqual(s5.less(s1), (True, True))
self.assertEqual(s5.less(s2), (True, True))
self.assertEqual(s5.less(s3), (True, True))
self.assertEqual(s5.less(s4), (True, True))
self.assertEqual(s5.less(s5), (False, False))
def test_equal(self):
s1 = Size(10, 10)
s2 = Size(20, 20)
s3 = Size(8, 20)
s4 = Size(40, 5)
s5 = Size(4, 4)
self.assertEqual(s1.equal(s1), (True, True))
self.assertEqual(s1.equal(s2), (False, False))
self.assertEqual(s1.equal(s3), (False, False))
self.assertEqual(s1.equal(s4), (False, False))
self.assertEqual(s1.equal(s5), (False, False))
self.assertEqual(s2.equal(s1), (False, False))
self.assertEqual(s2.equal(s2), (True, True))
self.assertEqual(s2.equal(s3), (False, True))
self.assertEqual(s2.equal(s4), (False, False))
self.assertEqual(s2.equal(s5), (False, False))
self.assertEqual(s3.equal(s1), (False, False))
self.assertEqual(s3.equal(s2), (False, True))
self.assertEqual(s3.equal(s3), (True, True))
self.assertEqual(s3.equal(s4), (False, False))
self.assertEqual(s3.equal(s5), (False, False))
self.assertEqual(s4.equal(s1), (False, False))
self.assertEqual(s4.equal(s2), (False, False))
self.assertEqual(s4.equal(s3), (False, False))
self.assertEqual(s4.equal(s4), (True, True))
self.assertEqual(s4.equal(s5), (False, False))
self.assertEqual(s5.equal(s1), (False, False))
self.assertEqual(s5.equal(s2), (False, False))
self.assertEqual(s5.equal(s3), (False, False))
self.assertEqual(s5.equal(s4), (False, False))
self.assertEqual(s5.equal(s5), (True, True))
| lgpl-3.0 | 3,716,127,138,101,747,700 | 32.299363 | 54 | 0.584162 | false |
lgrahl/threema-msgapi-sdk-python | examples/simple.py | 1 | 1737 | """
You can modify and use one of the functions below to test the gateway
service with your account.
"""
import asyncio
import logbook
import logbook.more
from threema.gateway import (
Connection,
GatewayError,
util,
)
from threema.gateway.simple import TextMessage
async def send_via_id(connection):
"""
Send a message to a specific Threema ID.
"""
message = TextMessage(
connection=connection,
to_id='ECHOECHO',
text='Hello from the world of Python!'
)
return await message.send()
async def send_via_email(connection):
"""
Send a message via an email address.
"""
message = TextMessage(
connection=connection,
email='[email protected]',
text='Hello from the world of Python!'
)
return await message.send()
async def send_via_phone(connection):
"""
Send a message via a phone number.
"""
message = TextMessage(
connection=connection,
phone='41791234567',
text='Hello from the world of Python!'
)
return await message.send()
async def main():
connection = Connection(
identity='*YOUR_GATEWAY_THREEMA_ID',
secret='YOUR_GATEWAY_THREEMA_ID_SECRET',
)
try:
async with connection:
await send_via_id(connection)
await send_via_email(connection)
await send_via_phone(connection)
except GatewayError as exc:
print('Error:', exc)
if __name__ == '__main__':
util.enable_logging(logbook.WARNING)
log_handler = logbook.more.ColorizedStderrHandler()
with log_handler.applicationbound():
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| mit | 877,856,302,636,758,300 | 22.472973 | 69 | 0.632124 | false |
niosus/EasyClangComplete | plugin/utils/subl/row_col.py | 1 | 2420 | """Represent different ways to work with row and column in views."""
class ZeroIndexedRowCol():
"""A cursor position as 0-indexed row and column."""
def __init__(self, row, col):
"""Initialize from row and column as seen in file (start with 1)."""
self._row = row
self._col = col
@property
def row(self):
"""Return row."""
return self._row
@property
def col(self):
"""Return col."""
return self._col
def as_1d_location(self, view):
"""Return the cursor position as 1d location in a view."""
return view.text_point(self._row, self._col)
@staticmethod
def from_one_indexed(one_indexed_row_col):
"""Convert 1-indexed row column into the 0-indexed representation."""
return ZeroIndexedRowCol(one_indexed_row_col._row - 1,
one_indexed_row_col._col - 1)
@staticmethod
def from_1d_location(view, pos):
"""Get row and column from a 1d location in a view."""
if pos is None:
return ZeroIndexedRowCol.from_current_cursor_pos(view)
row, col = view.rowcol(pos)
return ZeroIndexedRowCol(row, col)
@classmethod
def from_current_cursor_pos(cls, view):
"""Generate row and columg from current cursor position in view."""
pos = view.sel()
if pos is None or len(pos) < 1:
# something is wrong
return None
# We care about the first position only.
pos = pos[0].a
return cls.from_1d_location(view, pos)
def as_tuple(self):
"""Return as tuple."""
return (self._row, self._col)
class OneIndexedRowCol():
"""Stores a cursor position."""
def __init__(self, row, col):
"""Initialize from a zero-indexed row and column."""
self._row = row
self._col = col
@staticmethod
def from_zero_indexed(zero_indexed_row_col):
"""Convert 0-indexed row column into the 1-indexed representation."""
return ZeroIndexedRowCol(zero_indexed_row_col._row + 1,
zero_indexed_row_col._col + 1)
@property
def row(self):
"""Return row."""
return self._row
@property
def col(self):
"""Return col."""
return self._col
def as_tuple(self):
"""Return as tuple."""
return (self._row, self._col)
| mit | -211,356,029,211,549,730 | 28.512195 | 77 | 0.572727 | false |
rboman/progs | pytools/utils.py | 1 | 2981 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous utilities used by all the programs of 'progs'
"""
# def parseargs():
# """
# parses command line arguments
# """
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument(
# "-v",
# "--verb",
# help="increase output verbosity",
# action="count",
# default=0)
# parser.add_argument(
# "--nogui", help="disable any graphical output", action="store_true")
# parser.add_argument("-k", help="nb of threads", type=int, default=1)
# parser.add_argument('script', nargs=1, help='python script')
# args, other = parser.parse_known_args()
# print("args={}".format(args))
# print("other={}".format(other))
# return args
def chDir(dirname):
import os
os.chdir(dirname)
print("[in %s]" % os.getcwd())
def isUnix():
import platform
uname = platform.uname()
return not (uname[0] == 'Windows' or uname[2] == 'Windows')
def isInstalled(name):
"""Check whether `name` is on PATH."""
from distutils.spawn import find_executable
return find_executable(name) is not None
def cls():
"""Clear console"""
import platform
import os
uname = platform.uname()
if uname[0] == 'Windows':
os.system("CLS")
else:
os.system("clear")
# ----- from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/134892
class _Getch(object):
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix(object):
def __init__(self):
import tty
import sys
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows(object):
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch().decode('utf-8')
# -- variable globale --
getch = _Getch()
| apache-2.0 | -7,001,047,471,291,135,000 | 24.262712 | 78 | 0.60785 | false |
tjanez/bup | lib/bup/helpers.py | 1 | 33921 | """Helper functions and classes for bup."""
from collections import namedtuple
from ctypes import sizeof, c_void_p
from os import environ
from contextlib import contextmanager
import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
import hashlib, heapq, math, operator, time, grp, tempfile
from bup import _helpers
sc_page_size = os.sysconf('SC_PAGE_SIZE')
assert(sc_page_size > 0)
sc_arg_max = os.sysconf('SC_ARG_MAX')
if sc_arg_max == -1: # "no definite limit" - let's choose 2M
sc_arg_max = 2 * 1024 * 1024
# This function should really be in helpers, not in bup.options. But we
# want options.py to be standalone so people can include it in other projects.
from bup.options import _tty_width
tty_width = _tty_width
def atoi(s):
"""Convert the string 's' to an integer. Return 0 if s is not a number."""
try:
return int(s or '0')
except ValueError:
return 0
def atof(s):
"""Convert the string 's' to a float. Return 0 if s is not a number."""
try:
return float(s or '0')
except ValueError:
return 0
buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
if sys.platform.startswith('darwin'):
# Apparently fsync on OS X doesn't guarantee to sync all the way down
import fcntl
fdatasync = lambda fd : fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
else: # If the platform doesn't have fdatasync, fall back to fsync
try:
fdatasync = os.fdatasync
except AttributeError:
fdatasync = os.fsync
# Write (blockingly) to sockets that may or may not be in blocking mode.
# We need this because our stderr is sometimes eaten by subprocesses
# (probably ssh) that sometimes make it nonblocking, if only temporarily,
# leading to race conditions. Ick. We'll do it the hard way.
def _hard_write(fd, buf):
while buf:
(r,w,x) = select.select([], [fd], [], None)
if not w:
raise IOError('select(fd) returned without being writable')
try:
sz = os.write(fd, buf)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
assert(sz >= 0)
buf = buf[sz:]
_last_prog = 0
def log(s):
"""Print a log message to stderr."""
global _last_prog
sys.stdout.flush()
_hard_write(sys.stderr.fileno(), s)
_last_prog = 0
def debug1(s):
if buglvl >= 1:
log(s)
def debug2(s):
if buglvl >= 2:
log(s)
istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
_last_progress = ''
def progress(s):
"""Calls log() if stderr is a TTY. Does nothing otherwise."""
global _last_progress
if istty2:
log(s)
_last_progress = s
def qprogress(s):
"""Calls progress() only if we haven't printed progress in a while.
This avoids overloading the stderr buffer with excess junk.
"""
global _last_prog
now = time.time()
if now - _last_prog > 0.1:
progress(s)
_last_prog = now
def reprogress():
"""Calls progress() to redisplay the most recent progress message.
Useful after you've printed some other message that wipes out the
progress line.
"""
if _last_progress and _last_progress.endswith('\r'):
progress(_last_progress)
def mkdirp(d, mode=None):
"""Recursively create directories on path 'd'.
Unlike os.makedirs(), it doesn't raise an exception if the last element of
the path already exists.
"""
try:
if mode:
os.makedirs(d, mode)
else:
os.makedirs(d)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
_unspecified_next_default = object()
def _fallback_next(it, default=_unspecified_next_default):
"""Retrieve the next item from the iterator by calling its
next() method. If default is given, it is returned if the
iterator is exhausted, otherwise StopIteration is raised."""
if default is _unspecified_next_default:
return it.next()
else:
try:
return it.next()
except StopIteration:
return default
if sys.version_info < (2, 6):
next = _fallback_next
def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
if key:
samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
else:
samekey = operator.eq
count = 0
total = sum(len(it) for it in iters)
iters = (iter(it) for it in iters)
heap = ((next(it, None),it) for it in iters)
heap = [(e,it) for e,it in heap if e]
heapq.heapify(heap)
pe = None
while heap:
if not count % pfreq:
pfunc(count, total)
e, it = heap[0]
if not samekey(e, pe):
pe = e
yield e
count += 1
try:
e = it.next() # Don't use next() function, it's too expensive
except StopIteration:
heapq.heappop(heap) # remove current
else:
heapq.heapreplace(heap, (e, it)) # shift current to new location
pfinal(count, total)
def unlink(f):
"""Delete a file at path 'f' if it currently exists.
Unlike os.unlink(), does not throw an exception if the file didn't already
exist.
"""
try:
os.unlink(f)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def readpipe(argv, preexec_fn=None):
"""Run a subprocess and return its output."""
p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn)
out, err = p.communicate()
if p.returncode != 0:
raise Exception('subprocess %r failed with status %d'
% (' '.join(argv), p.returncode))
return out
def _argmax_base(command):
base_size = 2048
for c in command:
base_size += len(command) + 1
for k, v in environ.iteritems():
base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
return base_size
def _argmax_args_size(args):
return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
"""If args is not empty, yield the output produced by calling the
command list with args as a sequence of strings (It may be necessary
to return multiple strings in order to respect ARG_MAX)."""
# The optional arg_max arg is a workaround for an issue with the
# current wvtest behavior.
base_size = _argmax_base(command)
while args:
room = arg_max - base_size
i = 0
while i < len(args):
next_size = _argmax_args_size(args[i:i+1])
if room - next_size < 0:
break
room -= next_size
i += 1
sub_args = args[:i]
args = args[i:]
assert(len(sub_args))
yield readpipe(command + sub_args, preexec_fn=preexec_fn)
def realpath(p):
"""Get the absolute path of a file.
Behaves like os.path.realpath, but doesn't follow a symlink for the last
element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
will follow symlinks in p's directory)
"""
try:
st = os.lstat(p)
except OSError:
st = None
if st and stat.S_ISLNK(st.st_mode):
(dir, name) = os.path.split(p)
dir = os.path.realpath(dir)
out = os.path.join(dir, name)
else:
out = os.path.realpath(p)
#log('realpathing:%r,%r\n' % (p, out))
return out
def detect_fakeroot():
"Return True if we appear to be running under fakeroot."
return os.getenv("FAKEROOTKEY") != None
def is_superuser():
if sys.platform.startswith('cygwin'):
import ctypes
return ctypes.cdll.shell32.IsUserAnAdmin()
else:
return os.geteuid() == 0
def _cache_key_value(get_value, key, cache):
"""Return (value, was_cached). If there is a value in the cache
for key, use that, otherwise, call get_value(key) which should
throw a KeyError if there is no value -- in which case the cached
and returned value will be None.
"""
try: # Do we already have it (or know there wasn't one)?
value = cache[key]
return value, True
except KeyError:
pass
value = None
try:
cache[key] = value = get_value(key)
except KeyError:
cache[key] = None
return value, False
_uid_to_pwd_cache = {}
_name_to_pwd_cache = {}
def pwd_from_uid(uid):
"""Return password database entry for uid (may be a cached value).
Return None if no entry is found.
"""
global _uid_to_pwd_cache, _name_to_pwd_cache
entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
if entry and not cached:
_name_to_pwd_cache[entry.pw_name] = entry
return entry
def pwd_from_name(name):
"""Return password database entry for name (may be a cached value).
Return None if no entry is found.
"""
global _uid_to_pwd_cache, _name_to_pwd_cache
entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
if entry and not cached:
_uid_to_pwd_cache[entry.pw_uid] = entry
return entry
_gid_to_grp_cache = {}
_name_to_grp_cache = {}
def grp_from_gid(gid):
"""Return password database entry for gid (may be a cached value).
Return None if no entry is found.
"""
global _gid_to_grp_cache, _name_to_grp_cache
entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
if entry and not cached:
_name_to_grp_cache[entry.gr_name] = entry
return entry
def grp_from_name(name):
"""Return password database entry for name (may be a cached value).
Return None if no entry is found.
"""
global _gid_to_grp_cache, _name_to_grp_cache
entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
if entry and not cached:
_gid_to_grp_cache[entry.gr_gid] = entry
return entry
_username = None
def username():
"""Get the user's login name."""
global _username
if not _username:
uid = os.getuid()
_username = pwd_from_uid(uid)[0] or 'user%d' % uid
return _username
_userfullname = None
def userfullname():
"""Get the user's full name."""
global _userfullname
if not _userfullname:
uid = os.getuid()
entry = pwd_from_uid(uid)
if entry:
_userfullname = entry[4].split(',')[0] or entry[0]
if not _userfullname:
_userfullname = 'user%d' % uid
return _userfullname
_hostname = None
def hostname():
"""Get the FQDN of this machine."""
global _hostname
if not _hostname:
_hostname = socket.getfqdn()
return _hostname
_resource_path = None
def resource_path(subdir=''):
global _resource_path
if not _resource_path:
_resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
return os.path.join(_resource_path, subdir)
def format_filesize(size):
unit = 1024.0
size = float(size)
if size < unit:
return "%d" % (size)
exponent = int(math.log(size) / math.log(unit))
size_prefix = "KMGTPE"[exponent - 1]
return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
class NotOk(Exception):
pass
class BaseConn:
def __init__(self, outp):
self.outp = outp
def close(self):
while self._read(65536): pass
def read(self, size):
"""Read 'size' bytes from input stream."""
self.outp.flush()
return self._read(size)
def readline(self):
"""Read from input stream until a newline is found."""
self.outp.flush()
return self._readline()
def write(self, data):
"""Write 'data' to output stream."""
#log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
self.outp.write(data)
def has_input(self):
"""Return true if input stream is readable."""
raise NotImplemented("Subclasses must implement has_input")
def ok(self):
"""Indicate end of output from last sent command."""
self.write('\nok\n')
def error(self, s):
"""Indicate server error to the client."""
s = re.sub(r'\s+', ' ', str(s))
self.write('\nerror %s\n' % s)
def _check_ok(self, onempty):
self.outp.flush()
rl = ''
for rl in linereader(self):
#log('%d got line: %r\n' % (os.getpid(), rl))
if not rl: # empty line
continue
elif rl == 'ok':
return None
elif rl.startswith('error '):
#log('client: error: %s\n' % rl[6:])
return NotOk(rl[6:])
else:
onempty(rl)
raise Exception('server exited unexpectedly; see errors above')
def drain_and_check_ok(self):
"""Remove all data for the current command from input stream."""
def onempty(rl):
pass
return self._check_ok(onempty)
def check_ok(self):
"""Verify that server action completed successfully."""
def onempty(rl):
raise Exception('expected "ok", got %r' % rl)
return self._check_ok(onempty)
class Conn(BaseConn):
def __init__(self, inp, outp):
BaseConn.__init__(self, outp)
self.inp = inp
def _read(self, size):
return self.inp.read(size)
def _readline(self):
return self.inp.readline()
def has_input(self):
[rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
if rl:
assert(rl[0] == self.inp.fileno())
return True
else:
return None
def checked_reader(fd, n):
while n > 0:
rl, _, _ = select.select([fd], [], [])
assert(rl[0] == fd)
buf = os.read(fd, n)
if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
yield buf
n -= len(buf)
MAX_PACKET = 128 * 1024
def mux(p, outfd, outr, errr):
try:
fds = [outr, errr]
while p.poll() is None:
rl, _, _ = select.select(fds, [], [])
for fd in rl:
if fd == outr:
buf = os.read(outr, MAX_PACKET)
if not buf: break
os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
elif fd == errr:
buf = os.read(errr, 1024)
if not buf: break
os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
finally:
os.write(outfd, struct.pack('!IB', 0, 3))
class DemuxConn(BaseConn):
"""A helper class for bup's client-server protocol."""
def __init__(self, infd, outp):
BaseConn.__init__(self, outp)
# Anything that comes through before the sync string was not
# multiplexed and can be assumed to be debug/log before mux init.
tail = ''
while tail != 'BUPMUX':
b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
if not b:
raise IOError('demux: unexpected EOF during initialization')
tail += b
sys.stderr.write(tail[:-6]) # pre-mux log messages
tail = tail[-6:]
self.infd = infd
self.reader = None
self.buf = None
self.closed = False
def write(self, data):
self._load_buf(0)
BaseConn.write(self, data)
def _next_packet(self, timeout):
if self.closed: return False
rl, wl, xl = select.select([self.infd], [], [], timeout)
if not rl: return False
assert(rl[0] == self.infd)
ns = ''.join(checked_reader(self.infd, 5))
n, fdw = struct.unpack('!IB', ns)
assert(n <= MAX_PACKET)
if fdw == 1:
self.reader = checked_reader(self.infd, n)
elif fdw == 2:
for buf in checked_reader(self.infd, n):
sys.stderr.write(buf)
elif fdw == 3:
self.closed = True
debug2("DemuxConn: marked closed\n")
return True
def _load_buf(self, timeout):
if self.buf is not None:
return True
while not self.closed:
while not self.reader:
if not self._next_packet(timeout):
return False
try:
self.buf = self.reader.next()
return True
except StopIteration:
self.reader = None
return False
def _read_parts(self, ix_fn):
while self._load_buf(None):
assert(self.buf is not None)
i = ix_fn(self.buf)
if i is None or i == len(self.buf):
yv = self.buf
self.buf = None
else:
yv = self.buf[:i]
self.buf = self.buf[i:]
yield yv
if i is not None:
break
def _readline(self):
def find_eol(buf):
try:
return buf.index('\n')+1
except ValueError:
return None
return ''.join(self._read_parts(find_eol))
def _read(self, size):
csize = [size]
def until_size(buf): # Closes on csize
if len(buf) < csize[0]:
csize[0] -= len(buf)
return None
else:
return csize[0]
return ''.join(self._read_parts(until_size))
def has_input(self):
return self._load_buf(0)
def linereader(f):
"""Generate a list of input lines from 'f' without terminating newlines."""
while 1:
line = f.readline()
if not line:
break
yield line[:-1]
def chunkyreader(f, count = None):
"""Generate a list of chunks of data read from 'f'.
If count is None, read until EOF is reached.
If count is a positive integer, read 'count' bytes from 'f'. If EOF is
reached while reading, raise IOError.
"""
if count != None:
while count > 0:
b = f.read(min(count, 65536))
if not b:
raise IOError('EOF with %d bytes remaining' % count)
yield b
count -= len(b)
else:
while 1:
b = f.read(65536)
if not b: break
yield b
@contextmanager
def atomically_replaced_file(name, mode='w', buffering=-1):
"""Yield a file that will be atomically renamed name when leaving the block.
This contextmanager yields an open file object that is backed by a
temporary file which will be renamed (atomically) to the target
name if everything succeeds.
The mode and buffering arguments are handled exactly as with open,
and the yielded file will have very restrictive permissions, as
per mkstemp.
E.g.::
with atomically_replaced_file('foo.txt', 'w') as f:
f.write('hello jack.')
"""
(ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
text=('b' not in mode))
try:
try:
f = os.fdopen(ffd, mode, buffering)
except:
os.close(ffd)
raise
try:
yield f
finally:
f.close()
os.rename(tempname, name)
finally:
unlink(tempname) # nonexistant file is ignored
def slashappend(s):
"""Append "/" to 's' if it doesn't aleady end in "/"."""
if s and not s.endswith('/'):
return s + '/'
else:
return s
def _mmap_do(f, sz, flags, prot, close):
if not sz:
st = os.fstat(f.fileno())
sz = st.st_size
if not sz:
# trying to open a zero-length map gives an error, but an empty
# string has all the same behaviour of a zero-length map, ie. it has
# no elements :)
return ''
map = mmap.mmap(f.fileno(), sz, flags, prot)
if close:
f.close() # map will persist beyond file close
return map
def mmap_read(f, sz = 0, close=True):
"""Create a read-only memory mapped region on file 'f'.
If sz is 0, the region will cover the entire file.
"""
return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
def mmap_readwrite(f, sz = 0, close=True):
"""Create a read-write memory mapped region on file 'f'.
If sz is 0, the region will cover the entire file.
"""
return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
close)
def mmap_readwrite_private(f, sz = 0, close=True):
"""Create a read-write memory mapped region on file 'f'.
If sz is 0, the region will cover the entire file.
The map is private, which means the changes are never flushed back to the
file.
"""
return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
close)
_mincore = getattr(_helpers, 'mincore', None)
if _mincore:
# ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
_fmincore_chunk_size = None
def _set_fmincore_chunk_size():
global _fmincore_chunk_size
pref_chunk_size = 64 * 1024 * 1024
chunk_size = sc_page_size
if (sc_page_size < pref_chunk_size):
chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
_fmincore_chunk_size = chunk_size
def fmincore(fd):
"""Return the mincore() data for fd as a bytearray whose values can be
tested via MINCORE_INCORE, or None if fd does not fully
support the operation."""
st = os.fstat(fd)
if (st.st_size == 0):
return bytearray(0)
if not _fmincore_chunk_size:
_set_fmincore_chunk_size()
pages_per_chunk = _fmincore_chunk_size / sc_page_size;
page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
chunk_count = page_count / _fmincore_chunk_size
if chunk_count < 1:
chunk_count = 1
result = bytearray(page_count)
for ci in xrange(chunk_count):
pos = _fmincore_chunk_size * ci;
msize = min(_fmincore_chunk_size, st.st_size - pos)
try:
m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
except mmap.error as ex:
if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
# Perhaps the file was a pipe, i.e. "... | bup split ..."
return None
raise ex
_mincore(m, msize, 0, result, ci * pages_per_chunk);
return result
def parse_timestamp(epoch_str):
"""Return the number of nanoseconds since the epoch that are described
by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
throw a ValueError that may contain additional information."""
ns_per = {'s' : 1000000000,
'ms' : 1000000,
'us' : 1000,
'ns' : 1}
match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
if not match:
if re.match(r'^([-+]?[0-9]+)$', epoch_str):
raise ValueError('must include units, i.e. 100ns, 100ms, ...')
raise ValueError()
(n, units) = match.group(1, 2)
if not n:
n = 1
n = int(n)
return n * ns_per[units]
def parse_num(s):
"""Parse data size information into a float number.
Here are some examples of conversions:
199.2k means 203981 bytes
1GB means 1073741824 bytes
2.1 tb means 2199023255552 bytes
"""
g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
if not g:
raise ValueError("can't parse %r as a number" % s)
(val, unit) = g.groups()
num = float(val)
unit = unit.lower()
if unit in ['t', 'tb']:
mult = 1024*1024*1024*1024
elif unit in ['g', 'gb']:
mult = 1024*1024*1024
elif unit in ['m', 'mb']:
mult = 1024*1024
elif unit in ['k', 'kb']:
mult = 1024
elif unit in ['', 'b']:
mult = 1
else:
raise ValueError("invalid unit %r in number %r" % (unit, s))
return int(num*mult)
def count(l):
"""Count the number of elements in an iterator. (consumes the iterator)"""
return reduce(lambda x,y: x+1, l)
saved_errors = []
def add_error(e):
"""Append an error message to the list of saved errors.
Once processing is able to stop and output the errors, the saved errors are
accessible in the module variable helpers.saved_errors.
"""
saved_errors.append(e)
log('%-70s\n' % e)
def clear_errors():
global saved_errors
saved_errors = []
def handle_ctrl_c():
"""Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
The new exception handler will make sure that bup will exit without an ugly
stacktrace when Ctrl-C is hit.
"""
oldhook = sys.excepthook
def newhook(exctype, value, traceback):
if exctype == KeyboardInterrupt:
log('\nInterrupted.\n')
else:
return oldhook(exctype, value, traceback)
sys.excepthook = newhook
def columnate(l, prefix):
"""Format elements of 'l' in columns with 'prefix' leading each line.
The number of columns is determined automatically based on the string
lengths.
"""
if not l:
return ""
l = l[:]
clen = max(len(s) for s in l)
ncols = (tty_width() - len(prefix)) / (clen + 2)
if ncols <= 1:
ncols = 1
clen = 0
cols = []
while len(l) % ncols:
l.append('')
rows = len(l)/ncols
for s in range(0, len(l), rows):
cols.append(l[s:s+rows])
out = ''
for row in zip(*cols):
out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
return out
def parse_date_or_fatal(str, fatal):
"""Parses the given date or calls Option.fatal().
For now we expect a string that contains a float."""
try:
date = float(str)
except ValueError as e:
raise fatal('invalid date format (should be a float): %r' % e)
else:
return date
def parse_excludes(options, fatal):
"""Traverse the options and extract all excludes, or call Option.fatal()."""
excluded_paths = []
for flag in options:
(option, parameter) = flag
if option == '--exclude':
excluded_paths.append(realpath(parameter))
elif option == '--exclude-from':
try:
f = open(realpath(parameter))
except IOError as e:
raise fatal("couldn't read %s" % parameter)
for exclude_path in f.readlines():
# FIXME: perhaps this should be rstrip('\n')
exclude_path = realpath(exclude_path.strip())
if exclude_path:
excluded_paths.append(exclude_path)
return sorted(frozenset(excluded_paths))
def parse_rx_excludes(options, fatal):
"""Traverse the options and extract all rx excludes, or call
Option.fatal()."""
excluded_patterns = []
for flag in options:
(option, parameter) = flag
if option == '--exclude-rx':
try:
excluded_patterns.append(re.compile(parameter))
except re.error as ex:
fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
elif option == '--exclude-rx-from':
try:
f = open(realpath(parameter))
except IOError as e:
raise fatal("couldn't read %s" % parameter)
for pattern in f.readlines():
spattern = pattern.rstrip('\n')
if not spattern:
continue
try:
excluded_patterns.append(re.compile(spattern))
except re.error as ex:
fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
return excluded_patterns
def should_rx_exclude_path(path, exclude_rxs):
"""Return True if path matches a regular expression in exclude_rxs."""
for rx in exclude_rxs:
if rx.search(path):
debug1('Skipping %r: excluded by rx pattern %r.\n'
% (path, rx.pattern))
return True
return False
# FIXME: Carefully consider the use of functions (os.path.*, etc.)
# that resolve against the current filesystem in the strip/graft
# functions for example, but elsewhere as well. I suspect bup's not
# always being careful about that. For some cases, the contents of
# the current filesystem should be irrelevant, and consulting it might
# produce the wrong result, perhaps via unintended symlink resolution,
# for example.
def path_components(path):
"""Break path into a list of pairs of the form (name,
full_path_to_name). Path must start with '/'.
Example:
'/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
if not path.startswith('/'):
raise Exception, 'path must start with "/": %s' % path
# Since we assume path startswith('/'), we can skip the first element.
result = [('', '/')]
norm_path = os.path.abspath(path)
if norm_path == '/':
return result
full_path = ''
for p in norm_path.split('/')[1:]:
full_path += '/' + p
result.append((p, full_path))
return result
def stripped_path_components(path, strip_prefixes):
"""Strip any prefix in strip_prefixes from path and return a list
of path components where each component is (name,
none_or_full_fs_path_to_name). Assume path startswith('/').
See thelpers.py for examples."""
normalized_path = os.path.abspath(path)
sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
for bp in sorted_strip_prefixes:
normalized_bp = os.path.abspath(bp)
if normalized_bp == '/':
continue
if normalized_path.startswith(normalized_bp):
prefix = normalized_path[:len(normalized_bp)]
result = []
for p in normalized_path[len(normalized_bp):].split('/'):
if p: # not root
prefix += '/'
prefix += p
result.append((p, prefix))
return result
# Nothing to strip.
return path_components(path)
def grafted_path_components(graft_points, path):
# Create a result that consists of some number of faked graft
# directories before the graft point, followed by all of the real
# directories from path that are after the graft point. Arrange
# for the directory at the graft point in the result to correspond
# to the "orig" directory in --graft orig=new. See t/thelpers.py
# for some examples.
# Note that given --graft orig=new, orig and new have *nothing* to
# do with each other, even if some of their component names
# match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
# equivalent to --graft /foo/bar/baz=/x/y/z, or even
# /foo/bar/baz=/x.
# FIXME: This can't be the best solution...
clean_path = os.path.abspath(path)
for graft_point in graft_points:
old_prefix, new_prefix = graft_point
# Expand prefixes iff not absolute paths.
old_prefix = os.path.normpath(old_prefix)
new_prefix = os.path.normpath(new_prefix)
if clean_path.startswith(old_prefix):
escaped_prefix = re.escape(old_prefix)
grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
# Handle /foo=/ (at least) -- which produces //whatever.
grafted_path = '/' + grafted_path.lstrip('/')
clean_path_components = path_components(clean_path)
# Count the components that were stripped.
strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
new_prefix_parts = new_prefix.split('/')
result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
result = [(p, None) for p in result_prefix] \
+ clean_path_components[strip_count:]
# Now set the graft point name to match the end of new_prefix.
graft_point = len(result_prefix)
result[graft_point] = \
(new_prefix_parts[-1], clean_path_components[strip_count][1])
if new_prefix == '/': # --graft ...=/ is a special case.
return result[1:]
return result
return path_components(clean_path)
Sha1 = hashlib.sha1
_localtime = getattr(_helpers, 'localtime', None)
if _localtime:
bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
'tm_hour', 'tm_min', 'tm_sec',
'tm_wday', 'tm_yday',
'tm_isdst', 'tm_gmtoff', 'tm_zone'])
# Define a localtime() that returns bup_time when possible. Note:
# this means that any helpers.localtime() results may need to be
# passed through to_py_time() before being passed to python's time
# module, which doesn't appear willing to ignore the extra items.
if _localtime:
def localtime(time):
return bup_time(*_helpers.localtime(time))
def utc_offset_str(t):
"""Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
If the current UTC offset does not represent an integer number
of minutes, the fractional component will be truncated."""
off = localtime(t).tm_gmtoff
# Note: // doesn't truncate like C for negative values, it rounds down.
offmin = abs(off) // 60
m = offmin % 60
h = (offmin - m) // 60
return "%+03d%02d" % (-h if off < 0 else h, m)
def to_py_time(x):
if isinstance(x, time.struct_time):
return x
return time.struct_time(x[:9])
else:
localtime = time.localtime
def utc_offset_str(t):
return time.strftime('%z', localtime(t))
def to_py_time(x):
return x
| lgpl-2.1 | 5,800,003,140,876,239,000 | 30.379278 | 83 | 0.575838 | false |
kerrickstaley/wssh | test-server.py | 1 | 1078 | #!/usr/bin/env python2
import threading
import time
# HTTP-related
import SimpleHTTPServer
import SocketServer
import os
# websocket-related
from gevent import monkey; monkey.patch_all()
from ws4py.server.geventserver import WebSocketServer
from ws4py.websocket import WebSocket
import random
class HTTPThread(threading.Thread):
daemon = True
def run(self):
os.chdir('data')
class MyTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
server = MyTCPServer(('', 8000), SimpleHTTPServer.SimpleHTTPRequestHandler)
server.serve_forever()
class PrintWebSocket(WebSocket):
def received_message(self, message):
if random.random() < 0.2:
self.send(r'{"output": "\r\nHere is some output!\r\n"}');
print message
class WebsocketThread(threading.Thread):
daemon = True
def run(self):
server = WebSocketServer(('127.0.0.1', 8001), websocket_class=PrintWebSocket)
server.serve_forever()
HTTPThread().start()
WebsocketThread().start()
while True:
time.sleep(1)
| mit | -1,019,747,312,209,264,300 | 25.95 | 85 | 0.694805 | false |
cadecolvin/ToDo | todo/core.py | 1 | 3052 | import datetime
import pickle
from textwrap import TextWrapper
class Item:
"""Represents a ToDo item.
Args:
name (str): The name/title of the item
description (str): A longer description of the item
"""
def __init__(self, name, description):
self.name = name
self.description = description
self.create_date = datetime.date.today()
self.complete_date = datetime.date.today()
self.completed = False
self.notes = list()
def __str__(self):
return self.name
def format(self, id, width, verbose):
"""Formats the todo item to fit on the screen
Returns a string representation of the todo item that fits
within a terminal window of `width`.
Args:
id (int): The id of the todo item to format
width (int): The width of the screen to format for
verbose (bool):
If True, include all `notes`
If False, only include title
Returns:
str: A string formatted according to `width` and `verbose`.
"""
if self.completed:
title = f'{id}|{self.complete_date} -- {self.name}'
else:
title = f'{id}|{self.create_date} -- {self.name}'
if not verbose:
return title
wrapper = TextWrapper(width=width, expand_tabs=True)
wrapper.initial_indent = ' -'
wrapper.subsequent_indent = ' '
wrapped_desc = wrapper.fill(self.description)
wrapped_notes = list()
for note in self.notes:
wrapped_notes.append(wrapper.fill(note))
wrapped_notes = '\n'.join(wrapped_notes)
return '\n'.join([title, wrapped_desc, wrapped_notes])
class ItemManager:
"""Saves and loads the items at the path specified
Args:
file_path (str): The path to store/load todo items
"""
def __init__(self, file_path):
self.file_path = file_path
self.items = list()
def __enter__(self):
self.load()
return self
def __exit__(self, type, value, traceback):
self.save()
def save(self):
with open(self.file_path, 'wb') as f:
all_items = self.open_items + self.completed_items
pickle.dump(all_items, f)
def load(self):
try:
with open(self.file_path, 'rb') as f:
self.items = pickle.load(f)
self.open_items = [item for item in self.items if item.completed == False]
self.completed_items = [item for item in self.items if item.completed == True]
except:
print('Unknown error. Please run \'todo -h\' for help')
def initialize(self):
self.items = list()
open(self.file_path, 'w').close()
def complete(self, id):
self.open_items[id].complete_date = datetime.date.today()
self.open_items[id].completed = True
self.completed_items.append(self.open_items[id])
del self.open_items[id]
| gpl-3.0 | 6,191,962,772,768,519,000 | 28.066667 | 94 | 0.571101 | false |
materialsproject/MPContribs | mpcontribs-ingester/mpcontribs/ingester/webui.py | 1 | 9875 | from __future__ import unicode_literals, print_function, absolute_import
import json, os, socket, codecs, time, psutil
import sys, warnings, multiprocessing
from tempfile import gettempdir
from flask import render_template, request, Response, Blueprint, current_app
from flask import url_for, redirect, make_response, stream_with_context, jsonify
from mpcontribs.utils import process_mpfile, submit_mpfile
from mpcontribs.users_modules import *
from whichcraft import which
from subprocess import call
default_mpfile_path = os.path.join(gettempdir(), "mpfile.txt")
try:
import SocketServer as socketserver
except ImportError:
import socketserver
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates")
stat_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "static")
ingester_bp = Blueprint(
"webui_ingester", __name__, template_folder=tmpl_dir, static_folder=stat_dir
)
session = {}
projects = {}
for mod_path in get_users_modules():
mod = os.path.basename(mod_path)
path = os.path.join(mod_path, "mpfile_init.txt")
if os.path.exists(path):
projects[mod] = codecs.open(path, encoding="utf-8").read()
else:
projects[mod] = ""
def patched_finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
socketserver.StreamRequestHandler.finish = patched_finish
processes = {"NotebookProcess": None, "MongodProcess": None}
class NotebookProcess(multiprocessing.Process):
def __init__(self):
super(NotebookProcess, self).__init__(name="NotebookProcess")
def run(self):
from IPython.terminal.ipapp import launch_new_instance
sys.argv[1:] = []
warnings.filterwarnings("ignore", module="zmq.*")
sys.argv.append("notebook")
sys.argv.append("--IPKernelApp.pylab='inline'")
sys.argv.append("--NotebookApp.ip=0.0.0.0")
sys.argv.append("--NotebookApp.open_browser=False")
sys.argv.append('--NotebookApp.allow_origin="*"')
# sys.argv.append('--NotebookApp.port_retries=0')
launch_new_instance()
class MongodProcess(multiprocessing.Process):
def __init__(self):
super(MongodProcess, self).__init__(name="MongodProcess")
def run(self):
if which("mongod"):
cwd = os.path.join(os.path.dirname(__file__), "..", "..")
dbpath = os.path.join("/", "data", "db")
if not os.path.exists(dbpath):
dbpath = os.path.join(cwd, "db")
logpath = os.path.join(dbpath, "mongodb-mpcontribs.log")
call(["mongod", "--dbpath", dbpath, "--logpath", logpath, "--logappend"])
print("mongod started.")
else:
print("install MongoDB to use local DB instance.")
def start_processes():
global processes
for process_name in processes.keys():
if not processes[process_name]:
processes[process_name] = globals()[process_name]()
processes[process_name].start()
def stop_processes():
global processes
for process_name in processes.keys():
if processes[process_name]:
if process_name != "MongodProcess":
processes[process_name].terminate()
time.sleep(1)
processes[process_name] = None
parent = psutil.Process(os.getpid())
for child in parent.children(recursive=True):
if child.name() == "mongod":
child.kill()
print("killed mongod")
def stream_template(template_name, **context):
# http://stackoverflow.com/questions/13386681/streaming-data-with-python-and-flask
# http://flask.pocoo.org/docs/patterns/streaming/#streaming-from-templates
current_app.update_template_context(context)
t = current_app.jinja_env.get_template(template_name)
rv = t.stream(context)
return rv
def reset_session():
global session, processes
current_app.config["JSON_SORT_KEYS"] = False
current_app.secret_key = "xxxrrr"
session.clear()
session["projects"] = projects
session["options"] = ["archieml"]
session["contribute"] = {}
sbx_content = current_app.config.get("SANDBOX_CONTENT")
if sbx_content is not None:
session["sbx_content"] = sbx_content
session["jupyter_url"] = current_app.config.get("JUPYTER_URL")
if not current_app.config.get("START_JUPYTER") and "NotebookProcess" in processes:
processes.pop("NotebookProcess")
if not current_app.config.get("START_MONGODB") and "MongodProcess" in processes:
processes.pop("MongodProcess")
stop_processes()
start_processes()
for suffix in ["_in.txt", "_out.txt"]:
filepath = default_mpfile_path.replace(".txt", suffix)
if os.path.exists(filepath):
os.remove(filepath)
def read_mpfile_to_view():
output_mpfile_path = default_mpfile_path.replace(".txt", "_out.txt")
if os.path.exists(output_mpfile_path):
return open(output_mpfile_path).read()
else:
return session.get("mpfile")
@ingester_bp.route("/view")
@ingester_bp.route("/view/<identifier>/<cid_short>")
def view(identifier=None, cid_short=None):
mpfile = read_mpfile_to_view()
if mpfile is None:
return render_template("home.html", alert="Choose an MPFile!", session=session)
fmt = session["options"][0]
try:
mpfile_stringio = StringIO(mpfile)
if identifier is None or cid_short is None:
response = Response(
stream_with_context(
stream_template(
"index.html",
session=session,
content=process_mpfile(mpfile_stringio, fmt=fmt),
)
)
)
response.headers["X-Accel-Buffering"] = "no"
return response
else:
ids = [identifier, cid_short]
iterator = process_mpfile(mpfile_stringio, fmt=fmt, ids=ids)
for it in iterator:
if isinstance(it, list):
d = jsonify(it)
return d
except Exception:
pass
@ingester_bp.route("/")
def home():
reset_session()
return render_template("home.html", session=session)
@ingester_bp.route("/load")
def load():
mpfile = session.get("mpfile")
if mpfile is None:
return render_template("home.html", alert="Choose an MPFile!", session=session)
input_mpfile_path = default_mpfile_path.replace(".txt", "_in.txt")
with codecs.open(input_mpfile_path, encoding="utf-8", mode="w") as f:
f.write(mpfile)
return render_template("home.html", session=session)
@ingester_bp.route("/contribute", methods=["GET", "POST"])
def contribute():
session["scheme"] = "https" if os.environ.get("DEPLOYMENT") == "MATGEN" else "http"
if request.method == "GET":
return render_template("contribute.html", session=session)
elif request.method == "POST":
for k in request.form:
v = session["contribute"].get(k)
if not v or (request.form[k] and request.form[k] != v):
session["contribute"][k] = request.form[k]
if not session["contribute"].get("site"):
return render_template(
"contribute.html", session=session, missing="site not set!"
)
mpfile = read_mpfile_to_view()
if mpfile is None:
return render_template(
"home.html", alert="Choose an MPFile!", session=session
)
fmt = session["options"][0]
try:
response = Response(
stream_with_context(
stream_template(
"contribute.html",
session=session,
content=submit_mpfile(
StringIO(mpfile),
site=session["contribute"]["site"],
fmt=fmt,
project=session["options"][1],
),
)
)
)
response.headers["X-Accel-Buffering"] = "no"
return response
except Exception:
pass
@ingester_bp.route("/action", methods=["POST"])
def action():
session["options"] = json.loads(request.form.get("options"))
thebe_str = request.form.get("thebe")
if thebe_str:
session["thebe"] = "\n".join(json.loads(thebe_str))
# fmt = session['options'][0]
mpfile = request.files.get("file", StringIO()).read().decode("utf-8-sig")
if not mpfile:
mpfile = request.form.get("mpfile")
if not mpfile:
mpfile = session.get("mpfile")
if not mpfile:
return render_template(
"home.html", alert="Choose an MPFile!", session=session
)
session["mpfile"] = mpfile
if request.form["submit"] == "Load MPFile":
return redirect(url_for(".load"))
elif request.form["submit"] == "View MPFile":
return redirect(url_for(".view"))
elif request.form["submit"] == "Save MPFile":
response = make_response(read_mpfile_to_view())
response.headers["Content-Disposition"] = "attachment; filename=mpfile.txt"
return response
elif request.form["submit"] == "Contribute":
return redirect(url_for(".contribute"))
@ingester_bp.route("/shutdown", methods=["GET", "POST"])
def shutdown():
stop_processes()
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
return "Server shutting down..."
| mit | -176,121,219,628,774,750 | 34.01773 | 87 | 0.601013 | false |
hans/adversarial | distributions.py | 1 | 3175 | """Defines distributions from which to sample conditional data."""
import numpy as np
from pylearn2.format.target_format import OneHotFormatter
from pylearn2.space import VectorSpace
from pylearn2.utils import sharedX
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
class Distribution(object):
def __init__(self, space):
self.space = space
def get_space(self):
return self.space
def get_total_dimension(self):
return self.space.get_total_dimension()
def sample(self, n):
"""
Parameters
----------
n : integer
Number of samples to generate
Returns
-------
samples : batch of members of output space
"""
raise NotImplementedError("abstract method")
class OneHotDistribution(Distribution):
"""Randomly samples from a distribution of one-hot vectors."""
def __init__(self, space, rng=None):
super(OneHotDistribution, self).__init__(space)
self.dim = space.get_total_dimension()
self.formatter = OneHotFormatter(self.dim, dtype=space.dtype)
self.rng = RandomStreams() if rng is None else rng
def sample(self, n):
idxs = self.rng.random_integers((n, 1), low=0, high=self.dim - 1)
return self.formatter.theano_expr(idxs, mode='concatenate')
class KernelDensityEstimateDistribution(Distribution):
"""Randomly samples from a kernel density estimate yielded by a set
of training points.
Simple sampling procedure [1]:
1. With training points $x_1, ... x_n$, sample a point $x_i$
uniformly
2. From original KDE, we have a kernel defined at point $x_i$;
sample randomly from this kernel
[1]: http://www.stat.cmu.edu/~cshalizi/350/lectures/28/lecture-28.pdf
"""
def __init__(self, X, bandwidth=1, space=None, rng=None):
"""
Parameters
----------
X : ndarray of shape (num_examples, num_features)
Training examples from which to generate a kernel density
estimate
bandwidth : float
Bandwidth (or h, or sigma) of the generated kernels
"""
assert X.ndim == 2
if space is None:
space = VectorSpace(dim=X.shape[1], dtype=X.dtype)
# super(KernelDensityEstimateDistribution, self).__init__(space)
self.X = sharedX(X, name='KDE_X')
self.bandwidth = sharedX(bandwidth, name='bandwidth')
self.rng = RandomStreams() if rng is None else rng
def sample(self, n):
# Sample $n$ training examples
training_samples = self.X[self.rng.choice(size=(n,), a=self.X.shape[0], replace=True)]
# Sample individually from each selected associated kernel
#
# (not well documented within NumPy / Theano, but rng.normal
# call samples from a multivariate normal with diagonal
# covariance matrix)
ret = self.rng.normal(size=(n, self.X.shape[1]),
avg=training_samples, std=self.bandwidth,
dtype=theano.config.floatX)
return ret
| bsd-3-clause | 1,724,738,664,900,313,300 | 29.825243 | 94 | 0.625197 | false |
mtik00/yamicache | tests/test_class.py | 1 | 4025 | from __future__ import print_function
import sys
import time
import pytest
from yamicache import Cache, nocache, override_timeout
c = Cache(prefix="myapp", hashing=False, debug=False)
class MyApp(object):
@c.cached()
def test1(self, argument, power):
"""running test1"""
return argument ** power
@c.cached()
def test2(self):
"""running test2"""
return 1
@c.cached(key="asdf")
def test3(self, argument, power):
"""running test3"""
return argument ** power
def test4(self):
"""running test4"""
return 4
@c.cached()
def cant_cache(self):
print("here")
@pytest.fixture
def cache_obj():
m = MyApp()
return m
def test_cached(cache_obj):
for _ in range(10):
cache_obj.test1(8, 0)
assert len(c) == 1
assert cache_obj.test1(8, 0) == 1
for _ in range(10):
cache_obj.test2()
assert cache_obj.test2() == 1
assert len(c) == 2
c.clear()
assert len(c) == 0
# Make sure the cached function is properly wrapped
assert cache_obj.test2.__doc__ == "running test2"
def test_keyed_cached(cache_obj):
for _ in range(10):
cache_obj.test3(8, 0)
cache_obj.test4() # Shouldn't be cached
assert len(c) == 1
key = list(c.keys())[0]
assert key == "asdf"
c.clear()
assert len(c) == 0
# Make sure the cached function is properly wrapped
assert cache_obj.test3.__doc__ == "running test3"
def test_utility(cache_obj):
for _ in range(10):
cache_obj.test1(8, 0)
cache_obj.test1(8, 2)
cache_obj.test1(8, 2) # Already cached
cache_obj.test2()
cache_obj.test3(8, 2)
assert len(c) == 4
assert c.dump() != "{}"
key = list(c.keys())[0]
c.pop(key)
assert len(c) == 3
assert key not in c
assert len(c.keys()) == 3
assert len(c.values()) == 3
assert c.items()
c.clear()
assert not c.items()
assert not c.keys()
assert not c.values()
assert not len(c)
assert c.dump() == "{}"
def test_counters(cache_obj):
c.clear()
c._debug = True
for _ in range(10):
cache_obj.test3(8, 2)
assert len(c.counters) == 1
assert c.counters["asdf"] == 9
print(c.dump())
c.counters.clear()
c.clear()
def test_nocache(cache_obj):
c.clear()
c._debug = False
assert len(c.counters) == 0
assert len(c) == 0
with nocache(c):
for _ in range(10):
cache_obj.test3(8, 2)
cache_obj.test4()
assert len(c.counters) == 0
assert len(c) == 0
def test_timeout(cache_obj):
c.clear()
c._debug = True
cache_obj.test3(8, 2)
cache_obj.test3(8, 2)
time.sleep(1)
c.collect()
assert len(c) == 1
c.collect(since=time.time() - 20)
assert len(c) == 0
with override_timeout(c, 1):
cache_obj.test3(8, 2)
cache_obj.test3(8, 2)
assert len(c) == 1
time.sleep(1.5)
c.collect()
assert len(c) == 0
c.clear()
# Test a call where the cache has timed out.
# For this test, we want to load the cache with our specified timeout
# value. Then wait longer than the timeout, and run the function again.
# The hit counter should remain the same, since we didn't read the value
# from cache.
with override_timeout(c, 1):
cache_obj.test3(8, 2)
cache_obj.test3(8, 2)
assert len(c.counters) == 1
before_count = list(c.counters.values())[0]
assert len(c) == 1
time.sleep(1.5)
cache_obj.test3(8, 2) # should be a new cache w/o the counter incrementing
assert len(c) == 1
assert list(c.counters.values())[0] == before_count
def test_prefix(cache_obj):
c.clear()
cache_obj.test1(8, 0)
key = list(c.keys())[0]
assert key.startswith("myapp|")
def main():
# test_utility(MyApp())
# test_nocache(MyApp())
# test_cached(MyApp())
test_timeout(MyApp())
if __name__ == "__main__":
main()
| mit | -3,754,619,742,474,733,600 | 18.925743 | 79 | 0.569689 | false |
jsurloppe/N14 | N14/names_generator.py | 1 | 31120 | # flake8: noqa
# port of docker names-generator
import random
left = [
"admiring",
"adoring",
"affectionate",
"agitated",
"amazing",
"angry",
"awesome",
"blissful",
"boring",
"brave",
"clever",
"cocky",
"compassionate",
"competent",
"condescending",
"confident",
"cranky",
"dazzling",
"determined",
"distracted",
"dreamy",
"eager",
"ecstatic",
"elastic",
"elated",
"elegant",
"eloquent",
"epic",
"fervent",
"festive",
"flamboyant",
"focused",
"friendly",
"frosty",
"gallant",
"gifted",
"goofy",
"gracious",
"happy",
"hardcore",
"heuristic",
"hopeful",
"hungry",
"infallible",
"inspiring",
"jolly",
"jovial",
"keen",
"kind",
"laughing",
"loving",
"lucid",
"mystifying",
"modest",
"musing",
"naughty",
"nervous",
"nifty",
"nostalgic",
"objective",
"optimistic",
"peaceful",
"pedantic",
"pensive",
"practical",
"priceless",
"quirky",
"quizzical",
"relaxed",
"reverent",
"romantic",
"sad",
"serene",
"sharp",
"silly",
"sleepy",
"stoic",
"stupefied",
"suspicious",
"tender",
"thirsty",
"trusting",
"unruffled",
"upbeat",
"vibrant",
"vigilant",
"vigorous",
"wizardly",
"wonderful",
"xenodochial",
"youthful",
"zealous",
"zen",
]
right = [
# Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https:#en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB
"albattani",
# Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https:#en.wikipedia.org/wiki/Frances_E._Allen
"allen",
# June Almeida - Scottish virologist who took the first pictures of the rubella virus - https:#en.wikipedia.org/wiki/June_Almeida
"almeida",
# Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https:#en.wikipedia.org/wiki/Maria_Gaetana_Agnesi
"agnesi",
# Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https:#en.wikipedia.org/wiki/Archimedes
"archimedes",
# Maria Ardinghelli - Italian translator, mathematician and physicist - https:#en.wikipedia.org/wiki/Maria_Ardinghelli
"ardinghelli",
# Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https:#en.wikipedia.org/wiki/Aryabhata
"aryabhata",
# Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https:#en.wikipedia.org/wiki/Wanda_Austin
"austin",
# Charles Babbage invented the concept of a programmable computer. https:#en.wikipedia.org/wiki/Charles_Babbage.
"babbage",
# Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https:#en.wikipedia.org/wiki/Stefan_Banach
"banach",
# John Bardeen co-invented the transistor - https:#en.wikipedia.org/wiki/John_Bardeen
"bardeen",
# Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https:#en.wikipedia.org/wiki/Jean_Bartik
"bartik",
# Laura Bassi, the world's first female professor https:#en.wikipedia.org/wiki/Laura_Bassi
"bassi",
# Hugh Beaver, British engineer, founder of the Guinness Book of World Records https:#en.wikipedia.org/wiki/Hugh_Beaver
"beaver",
# Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https:#en.wikipedia.org/wiki/Alexander_Graham_Bell
"bell",
# Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https:#en.wikipedia.org/wiki/Karl_Benz
"benz",
# Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https:#en.wikipedia.org/wiki/Homi_J._Bhabha
"bhabha",
# Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https:#en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus
"bhaskara",
# Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https:#en.wikipedia.org/wiki/Elizabeth_Blackwell
"blackwell",
# Niels Bohr is the father of quantum theory. https:#en.wikipedia.org/wiki/Niels_Bohr.
"bohr",
# Kathleen Booth, she's credited with writing the first assembly language. https:#en.wikipedia.org/wiki/Kathleen_Booth
"booth",
# Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https:#en.wikipedia.org/wiki/Anita_Borg
"borg",
# Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https:#en.wikipedia.org/wiki/Satyendra_Nath_Bose
"bose",
# Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https:#en.wikipedia.org/wiki/Evelyn_Boyd_Granville
"boyd",
# Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https:#en.wikipedia.org/wiki/Brahmagupta#Zero
"brahmagupta",
# Walter Houser Brattain co-invented the transistor - https:#en.wikipedia.org/wiki/Walter_Houser_Brattain
"brattain",
# Emmett Brown invented time travel. https:#en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff)
"brown",
# Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https:#en.wikipedia.org/wiki/Rachel_Carson
"carson",
# Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https:#en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar
"chandrasekhar",
#Claude Shannon - The father of information theory and founder of digital circuit design theory. (https:#en.wikipedia.org/wiki/Claude_Shannon)
"shannon",
# Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https:#en.wikipedia.org/wiki/Joan_Clarke
"clarke",
# Jane Colden - American botanist widely considered the first female American botanist - https:#en.wikipedia.org/wiki/Jane_Colden
"colden",
# Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https:#en.wikipedia.org/wiki/Gerty_Cori
"cori",
# Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https:#en.wikipedia.org/wiki/Seymour_Cray
"cray",
# This entry reflects a husband and wife team who worked together:
# Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https:#en.wikipedia.org/wiki/Joan_Curran
# Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https:#en.wikipedia.org/wiki/Samuel_Curran
"curran",
# Marie Curie discovered radioactivity. https:#en.wikipedia.org/wiki/Marie_Curie.
"curie",
# Charles Darwin established the principles of natural evolution. https:#en.wikipedia.org/wiki/Charles_Darwin.
"darwin",
# Leonardo Da Vinci invented too many things to list here. https:#en.wikipedia.org/wiki/Leonardo_da_Vinci.
"davinci",
# Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https:#en.wikipedia.org/wiki/Edsger_W._Dijkstra.
"dijkstra",
# Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https:#en.wikipedia.org/wiki/Donna_Dubinsky
"dubinsky",
# Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https:#en.wikipedia.org/wiki/Annie_Easley
"easley",
# Thomas Alva Edison, prolific inventor https:#en.wikipedia.org/wiki/Thomas_Edison
"edison",
# Albert Einstein invented the general theory of relativity. https:#en.wikipedia.org/wiki/Albert_Einstein
"einstein",
# Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https:#en.wikipedia.org/wiki/Gertrude_Elion
"elion",
# Douglas Engelbart gave the mother of all demos: https:#en.wikipedia.org/wiki/Douglas_Engelbart
"engelbart",
# Euclid invented geometry. https:#en.wikipedia.org/wiki/Euclid
"euclid",
# Leonhard Euler invented large parts of modern mathematics. https:#de.wikipedia.org/wiki/Leonhard_Euler
"euler",
# Pierre de Fermat pioneered several aspects of modern mathematics. https:#en.wikipedia.org/wiki/Pierre_de_Fermat
"fermat",
# Enrico Fermi invented the first nuclear reactor. https:#en.wikipedia.org/wiki/Enrico_Fermi.
"fermi",
# Richard Feynman was a key contributor to quantum mechanics and particle physics. https:#en.wikipedia.org/wiki/Richard_Feynman
"feynman",
# Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod.
"franklin",
# Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https:#en.wikipedia.org/wiki/Galileo_Galilei
"galileo",
# William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https:#en.wikipedia.org/wiki/Bill_Gates
"gates",
# Adele Goldberg, was one of the designers and developers of the Smalltalk language. https:#en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist)
"goldberg",
# Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https:#en.wikipedia.org/wiki/Adele_Goldstine
"goldstine",
# Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https:#en.wikipedia.org/wiki/Shafi_Goldwasser
"goldwasser",
# James Golick, all around gangster.
"golick",
# Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https:#en.wikipedia.org/wiki/Jane_Goodall
"goodall",
# Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https:#en.wikipedia.org/wiki/Lois_Haibt
"haibt",
# Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https:#en.wikipedia.org/wiki/Margaret_Hamilton_(scientist)
"hamilton",
# Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https:#en.wikipedia.org/wiki/Stephen_Hawking
"hawking",
# Werner Heisenberg was a founding father of quantum mechanics. https:#en.wikipedia.org/wiki/Werner_Heisenberg
"heisenberg",
# Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https:#en.wikipedia.org/wiki/Grete_Hermann
"hermann",
# Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https:#en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD
"heyrovsky",
# Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https:#en.wikipedia.org/wiki/Dorothy_Hodgkin
"hodgkin",
# Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https:#en.wikipedia.org/wiki/Erna_Schneider_Hoover
"hoover",
# Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https:#en.wikipedia.org/wiki/Grace_Hopper
"hopper",
# Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https:#en.wikipedia.org/wiki/Frances_Hugle
"hugle",
# Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https:#en.wikipedia.org/wiki/Hypatia
"hypatia",
# Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https:#en.wikipedia.org/wiki/Mary_Jackson_(engineer)
"jackson",
# Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https:#en.wikipedia.org/wiki/Jang_Yeong-sil
"jang",
# Betty Jennings - one of the original programmers of the ENIAC. https:#en.wikipedia.org/wiki/ENIAC - https:#en.wikipedia.org/wiki/Jean_Bartik
"jennings",
# Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https:#en.wikipedia.org/wiki/Mary_Lou_Jepsen
"jepsen",
# Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https:#en.wikipedia.org/wiki/Katherine_Johnson
"johnson",
# Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https:#en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie
"joliot",
# Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https:#en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones
"jones",
# A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https:#en.wikipedia.org/wiki/A._P._J._Abdul_Kalam
"kalam",
# Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https:#en.wikipedia.org/wiki/Susan_Kare
"kare",
# Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https:#en.wikipedia.org/wiki/Mary_Kenneth_Keller
"keller",
# Johannes Kepler, German astronomer known for his three laws of planetary motion - https:#en.wikipedia.org/wiki/Johannes_Kepler
"kepler",
# Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https:#en.wikipedia.org/wiki/Har_Gobind_Khorana
"khorana",
# Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https:#en.wikipedia.org/wiki/Jack_Kilby
"kilby",
# Maria Kirch - German astronomer and first woman to discover a comet - https:#en.wikipedia.org/wiki/Maria_Margarethe_Kirch
"kirch",
# Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https:#en.wikipedia.org/wiki/Donald_Knuth
"knuth",
# Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https:#en.wikipedia.org/wiki/Sofia_Kovalevskaya
"kowalevski",
# Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https:#en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande
"lalande",
# Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https:#en.wikipedia.org/wiki/Hedy_Lamarr
"lamarr",
# Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https:#en.wikipedia.org/wiki/Leslie_Lamport
"lamport",
# Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https:#en.wikipedia.org/wiki/Mary_Leakey
"leakey",
# Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https:#en.wikipedia.org/wiki/Henrietta_Swan_Leavitt
"leavitt",
#Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https:#en.wikipedia.org/wiki/Daniel_Lewin
"lewin",
# Ruth Lichterman - one of the original programmers of the ENIAC. https:#en.wikipedia.org/wiki/ENIAC - https:#en.wikipedia.org/wiki/Ruth_Teitelbaum
"lichterman",
# Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https:#en.wikipedia.org/wiki/Barbara_Liskov
"liskov",
# Ada Lovelace invented the first algorithm. https:#en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull)
"lovelace",
# Auguste and Louis Lumière - the first filmmakers in history - https:#en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re
"lumiere",
# Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https:#en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician)
"mahavira",
# Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https:#en.wikipedia.org/wiki/Maria_Mayer
"mayer",
# John McCarthy invented LISP: https:#en.wikipedia.org/wiki/John_McCarthy_(computer_scientist)
"mccarthy",
# Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https:#en.wikipedia.org/wiki/Barbara_McClintock
"mcclintock",
# Malcolm McLean invented the modern shipping container: https:#en.wikipedia.org/wiki/Malcom_McLean
"mclean",
# Kay McNulty - one of the original programmers of the ENIAC. https:#en.wikipedia.org/wiki/ENIAC - https:#en.wikipedia.org/wiki/Kathleen_Antonelli
"mcnulty",
# Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https:#en.wikipedia.org/wiki/Lise_Meitner
"meitner",
# Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https:#en.wikipedia.org/wiki/Carla_Meninsky
"meninsky",
# Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https:#en.wikipedia.org/wiki/Johanna_Mestorf
"mestorf",
# Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https:#en.wikipedia.org/wiki/Marvin_Minsky
"minsky",
# Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https:#en.wikipedia.org/wiki/Maryam_Mirzakhani
"mirzakhani",
# Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https:#en.wikipedia.org/wiki/Samuel_Morse
"morse",
# Ian Murdock - founder of the Debian project - https:#en.wikipedia.org/wiki/Ian_Murdock
"murdock",
# John von Neumann - todays computer architectures are based on the von Neumann architecture. https:#en.wikipedia.org/wiki/Von_Neumann_architecture
"neumann",
# Isaac Newton invented classic mechanics and modern optics. https:#en.wikipedia.org/wiki/Isaac_Newton
"newton",
# Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https:#en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform
"nightingale",
# Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https:#en.wikipedia.org/wiki/Alfred_Nobel
"nobel",
# Emmy Noether, German mathematician. Noether's Theorem is named after her. https:#en.wikipedia.org/wiki/Emmy_Noether
"noether",
# Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http:#www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1
"northcutt",
# Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https:#en.wikipedia.org/wiki/Robert_Noyce
"noyce",
# Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https:#en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems
"panini",
# Ambroise Pare invented modern surgery. https:#en.wikipedia.org/wiki/Ambroise_Par%C3%A9
"pare",
# Louis Pasteur discovered vaccination, fermentation and pasteurization. https:#en.wikipedia.org/wiki/Louis_Pasteur.
"pasteur",
# Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https:#en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin
"payne",
# Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https:#en.wikipedia.org/wiki/Radia_Perlman
"perlman",
# Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https:#en.wikipedia.org/wiki/Rob_Pike
"pike",
# Henri Poincaré made fundamental contributions in several fields of mathematics. https:#en.wikipedia.org/wiki/Henri_Poincar%C3%A9
"poincare",
# Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https:#en.wikipedia.org/wiki/Laura_Poitras
"poitras",
# Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https:#en.wikipedia.org/wiki/Ptolemy
"ptolemy",
# C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https:#en.wikipedia.org/wiki/C._V._Raman
"raman",
# Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https:#en.wikipedia.org/wiki/Srinivasa_Ramanujan
"ramanujan",
# Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https:#en.wikipedia.org/wiki/Sally_Ride
"ride",
# Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https:#en.wikipedia.org/wiki/Rita_Levi-Montalcini)
"montalcini",
# Dennis Ritchie - co-creator of UNIX and the C programming language. - https:#en.wikipedia.org/wiki/Dennis_Ritchie
"ritchie",
# Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https:#en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen
"roentgen",
# Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https:#en.wikipedia.org/wiki/Rosalind_Franklin
"rosalind",
# Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https:#en.wikipedia.org/wiki/Meghnad_Saha
"saha",
# Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https:#en.wikipedia.org/wiki/Jean_E._Sammet
"sammet",
# Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https:#en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer)
"shaw",
# Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https:#en.wikipedia.org/wiki/Steve_Shirley
"shirley",
# William Shockley co-invented the transistor - https:#en.wikipedia.org/wiki/William_Shockley
"shockley",
# Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https:#en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi
"sinoussi",
# Betty Snyder - one of the original programmers of the ENIAC. https:#en.wikipedia.org/wiki/ENIAC - https:#en.wikipedia.org/wiki/Betty_Holberton
"snyder",
# Frances Spence - one of the original programmers of the ENIAC. https:#en.wikipedia.org/wiki/ENIAC - https:#en.wikipedia.org/wiki/Frances_Spence
"spence",
# Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https:#en.wikiquote.org/wiki/Richard_Stallman
"stallman",
# Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https:#en.wikipedia.org/wiki/Michael_Stonebraker
"stonebraker",
# Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https:#en.wikipedia.org/wiki/Janese_Swanson
"swanson",
# Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https:#en.wikiquote.org/wiki/Aaron_Swartz
"swartz",
# Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https:#en.wikipedia.org/wiki/Bertha_Swirles
"swirles",
# Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https:#en.wikipedia.org/wiki/Nikola_Tesla
"tesla",
# Ken Thompson - co-creator of UNIX and the C programming language - https:#en.wikipedia.org/wiki/Ken_Thompson
"thompson",
# Linus Torvalds invented Linux and Git. https:#en.wikipedia.org/wiki/Linus_Torvalds
"torvalds",
# Alan Turing was a founding father of computer science. https:#en.wikipedia.org/wiki/Alan_Turing.
"turing",
# Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https:#en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions
"varahamihira",
# Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https:#en.wikipedia.org/wiki/Visvesvaraya
"visvesvaraya",
# Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https:#en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard
"volhard",
# Marlyn Wescoff - one of the original programmers of the ENIAC. https:#en.wikipedia.org/wiki/ENIAC - https:#en.wikipedia.org/wiki/Marlyn_Meltzer
"wescoff",
# Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https:#en.wikipedia.org/wiki/Andrew_Wiles
"wiles",
# Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https:#en.wikipedia.org/wiki/Roberta_Williams
"williams",
# Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https:#en.wikipedia.org/wiki/Sophie_Wilson
"wilson",
# Jeannette Wing - co-developed the Liskov substitution principle. - https:#en.wikipedia.org/wiki/Jeannette_Wing
"wing",
# Steve Wozniak invented the Apple I and Apple II. https:#en.wikipedia.org/wiki/Steve_Wozniak
"wozniak",
# The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https:#en.wikipedia.org/wiki/Wright_brothers
"wright",
# Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https:#en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow
"yalow",
# Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https:#en.wikipedia.org/wiki/Ada_Yonath
"yonath",
]
def get_random_name(retry=0):
"""
get_random_name generates a random name from the list of adjectives and surnames in this package
formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random
integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3`
"""
name = "%s_%s" % (random.choice(left), random.choice(right))
if retry > 0:
name = "%s%d" % (name, random.randint(0, 10))
return name
| gpl-3.0 | 235,119,602,138,753,730 | 51.257143 | 378 | 0.740713 | false |
dheerajgopi/google-python | basic/wordcount.py | 1 | 2942 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
def file_read(filename):
# generator function for yielding lines
with open(filename) as f:
for lines in f:
yield lines.split()
def build_dict(filename):
# building dict with word as key and count as value
res_dict = {}
for lines in file_read(filename):
for words in lines:
word = words.lower()
res_dict[word] = res_dict.setdefault(word, 0) + 1
return res_dict
def build_list(word_dict):
# for building a list containing tuples of word and count
word_list = []
for k, v in word_dict.items():
word_list.append((k, v))
return word_list
def print_words(filename):
# printing the words and its count in alphabetic order
word_dict = build_dict(filename)
word_list = build_list(word_dict)
word_list.sort()
for word, count in word_list:
print word, '---', count
def print_top(filename):
# printing 20 most commonly used words
word_dict = build_dict(filename)
word_list = build_list(word_dict)
word_list.sort(key = lambda x:x[1], reverse = True)
for i in xrange(20):
print word_list[i][0],'----', word_list[i][1]
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
# Standard Boilerplate
if __name__ == '__main__':
main()
| apache-2.0 | 7,231,735,229,345,965,000 | 28.717172 | 79 | 0.705303 | false |
FinalAngel/django-cms | cms/utils/placeholder.py | 1 | 12108 | # -*- coding: utf-8 -*-
import operator
import warnings
from collections import namedtuple
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.query_utils import Q
from django.template import TemplateSyntaxError, NodeList, Variable, Context, Template, engines
from django.template.base import VariableNode
from django.template.loader import get_template
from django.template.loader_tags import BlockNode, ExtendsNode, IncludeNode
from django.utils import six
from sekizai.helpers import get_varname, is_variable_extend_node
from cms.exceptions import DuplicatePlaceholderWarning
from cms.utils import get_cms_setting
DeclaredPlaceholder = namedtuple('DeclaredPlaceholder', ['slot', 'inherit'])
def _get_nodelist(tpl):
if hasattr(tpl, 'template'):
return tpl.template.nodelist
else:
return tpl.nodelist
def get_context():
if engines is not None:
context = Context()
context.template = Template('')
return context
else:
return {}
def get_placeholder_conf(setting, placeholder, template=None, default=None):
"""
Returns the placeholder configuration for a given setting. The key would for
example be 'plugins' or 'name'.
Resulting value will be the last from:
CMS_PLACEHOLDER_CONF[None] (global)
CMS_PLACEHOLDER_CONF['template'] (if template is given)
CMS_PLACEHOLDER_CONF['placeholder']
CMS_PLACEHOLDER_CONF['template placeholder'] (if template is given)
"""
if placeholder:
keys = []
placeholder_conf = get_cms_setting('PLACEHOLDER_CONF')
# 1st level
if template:
keys.append(u'%s %s' % (template, placeholder))
# 2nd level
keys.append(placeholder)
# 3rd level
if template:
keys.append(template)
# 4th level
keys.append(None)
for key in keys:
try:
conf = placeholder_conf[key]
value = conf.get(setting, None)
if value is not None:
return value
inherit = conf.get('inherit')
if inherit:
if ' ' in inherit:
inherit = inherit.split(' ')
else:
inherit = (None, inherit)
value = get_placeholder_conf(setting, inherit[1], inherit[0], default)
if value is not None:
return value
except KeyError:
continue
return default
def get_toolbar_plugin_struct(plugins, slot=None, page=None):
"""
Return the list of plugins to render in the toolbar.
The dictionary contains the label, the classname and the module for the
plugin.
Names and modules can be defined on a per-placeholder basis using
'plugin_modules' and 'plugin_labels' attributes in CMS_PLACEHOLDER_CONF
:param plugins: list of plugins
:param slot: placeholder slot name
:param page: the page
:return: list of dictionaries
"""
template = None
if page:
template = page.template
modules = get_placeholder_conf("plugin_modules", slot, template, default={})
names = get_placeholder_conf("plugin_labels", slot, template, default={})
main_list = []
# plugin.value points to the class name of the plugin
# It's added on registration. TIL.
for plugin in plugins:
main_list.append({'value': plugin.value,
'name': names.get(plugin.value, plugin.name),
'module': modules.get(plugin.value, plugin.module)})
return sorted(main_list, key=operator.itemgetter("module"))
def validate_placeholder_name(name):
if not isinstance(name, six.string_types):
raise ImproperlyConfigured("Placeholder identifier names need to be of type string. ")
if not all(ord(char) < 128 for char in name):
raise ImproperlyConfigured("Placeholder identifiers names may not "
"contain non-ascii characters. If you wish your placeholder "
"identifiers to contain non-ascii characters when displayed to "
"users, please use the CMS_PLACEHOLDER_CONF setting with the 'name' "
"key to specify a verbose name.")
class PlaceholderNoAction(object):
can_copy = False
def copy(self, **kwargs):
return False
def get_copy_languages(self, **kwargs):
return []
class MLNGPlaceholderActions(PlaceholderNoAction):
can_copy = True
def copy(self, target_placeholder, source_language, fieldname, model, target_language, **kwargs):
from cms.utils.copy_plugins import copy_plugins_to
trgt = model.objects.get(**{fieldname: target_placeholder})
src = model.objects.get(master=trgt.master, language_code=source_language)
source_placeholder = getattr(src, fieldname, None)
if not source_placeholder:
return False
return copy_plugins_to(source_placeholder.get_plugins_list(),
target_placeholder, target_language)
def get_copy_languages(self, placeholder, model, fieldname, **kwargs):
manager = model.objects
src = manager.get(**{fieldname: placeholder})
query = Q(master=src.master)
query &= Q(**{'%s__cmsplugin__isnull' % fieldname: False})
query &= ~Q(pk=src.pk)
language_codes = manager.filter(query).values_list('language_code', flat=True).distinct()
return [(lc, dict(settings.LANGUAGES)[lc]) for lc in language_codes]
def restore_sekizai_context(context, changes):
varname = get_varname()
sekizai_container = context.get(varname)
for key, values in changes.items():
sekizai_namespace = sekizai_container[key]
for value in values:
sekizai_namespace.append(value)
def _scan_placeholders(nodelist, current_block=None, ignore_blocks=None):
from cms.templatetags.cms_tags import Placeholder
placeholders = []
if ignore_blocks is None:
# List of BlockNode instances to ignore.
# This is important to avoid processing overriden block nodes.
ignore_blocks = []
for node in nodelist:
# check if this is a placeholder first
if isinstance(node, Placeholder):
placeholders.append(node)
elif isinstance(node, IncludeNode):
# if there's an error in the to-be-included template, node.template becomes None
if node.template:
# Check if it quacks like a template object, if not
# presume is a template path and get the object out of it
if not callable(getattr(node.template, 'render', None)):
# If it's a variable there is no way to expand it at this stage so we
# need to skip it
if isinstance(node.template.var, Variable):
continue
else:
template = get_template(node.template.var)
else:
template = node.template
placeholders += _scan_placeholders(_get_nodelist(template), current_block)
# handle {% extends ... %} tags
elif isinstance(node, ExtendsNode):
placeholders += _extend_nodelist(node)
# in block nodes we have to scan for super blocks
elif isinstance(node, VariableNode) and current_block:
if node.filter_expression.token == 'block.super':
if not hasattr(current_block.super, 'nodelist'):
raise TemplateSyntaxError("Cannot render block.super for blocks without a parent.")
placeholders += _scan_placeholders(_get_nodelist(current_block.super), current_block.super)
# ignore nested blocks which are already handled
elif isinstance(node, BlockNode) and node.name in ignore_blocks:
continue
# if the node has the newly introduced 'child_nodelists' attribute, scan
# those attributes for nodelists and recurse them
elif hasattr(node, 'child_nodelists'):
for nodelist_name in node.child_nodelists:
if hasattr(node, nodelist_name):
subnodelist = getattr(node, nodelist_name)
if isinstance(subnodelist, NodeList):
if isinstance(node, BlockNode):
current_block = node
placeholders += _scan_placeholders(subnodelist, current_block, ignore_blocks)
# else just scan the node for nodelist instance attributes
else:
for attr in dir(node):
obj = getattr(node, attr)
if isinstance(obj, NodeList):
if isinstance(node, BlockNode):
current_block = node
placeholders += _scan_placeholders(obj, current_block, ignore_blocks)
return placeholders
def get_placeholders(template):
compiled_template = get_template(template)
placeholders = []
placeholder_nodes = _scan_placeholders(_get_nodelist(compiled_template))
clean_placeholders = []
for node in placeholder_nodes:
slot = node.get_name()
inherit = node.get_inherit_status()
if slot in clean_placeholders:
warnings.warn("Duplicate {{% placeholder \"{0}\" %}} "
"in template {1}."
.format(slot, template, slot),
DuplicatePlaceholderWarning)
else:
validate_placeholder_name(slot)
placeholders.append(DeclaredPlaceholder(slot=slot, inherit=inherit))
clean_placeholders.append(slot)
return placeholders
def _extend_nodelist(extend_node):
"""
Returns a list of placeholders found in the parent template(s) of this
ExtendsNode
"""
# we don't support variable extensions
if is_variable_extend_node(extend_node):
return []
# This is a dictionary mapping all BlockNode instances found in the template that contains extend_node
blocks = dict(extend_node.blocks)
_extend_blocks(extend_node, blocks)
placeholders = []
for block in blocks.values():
placeholders += _scan_placeholders(_get_nodelist(block), block, blocks.keys())
# Scan topmost template for placeholder outside of blocks
parent_template = _find_topmost_template(extend_node)
placeholders += _scan_placeholders(_get_nodelist(parent_template), None, blocks.keys())
return placeholders
def _find_topmost_template(extend_node):
parent_template = extend_node.get_parent(get_context())
for node in _get_nodelist(parent_template).get_nodes_by_type(ExtendsNode):
# Their can only be one extend block in a template, otherwise django raises an exception
return _find_topmost_template(node)
# No ExtendsNode
return extend_node.get_parent(get_context())
def _extend_blocks(extend_node, blocks):
"""
Extends the dictionary `blocks` with *new* blocks in the parent node (recursive)
"""
# we don't support variable extensions
if is_variable_extend_node(extend_node):
return
parent = extend_node.get_parent(get_context())
# Search for new blocks
for node in _get_nodelist(parent).get_nodes_by_type(BlockNode):
if not node.name in blocks:
blocks[node.name] = node
else:
# set this node as the super node (for {{ block.super }})
block = blocks[node.name]
seen_supers = []
while hasattr(block.super, 'nodelist') and block.super not in seen_supers:
seen_supers.append(block.super)
block = block.super
block.super = node
# search for further ExtendsNodes
for node in _get_nodelist(parent).get_nodes_by_type(ExtendsNode):
_extend_blocks(node, blocks)
break
| bsd-3-clause | -7,211,856,277,109,119,000 | 37.683706 | 110 | 0.623472 | false |
jbutler42/pynet_class | pyclass/week4/shell.py | 1 | 2967 | #!/usr/bin/env python
import paramiko
from getpass import getpass
from pyclass.common import config
from pyclass.common.entities import entities
(device_ip, device_port, device_type) = entities.ssh_devices['rtr1']
username = entities.entities['ssh_user']
print device_ip, device_port, username
class shellConnection(object):
def __init__(self, **kwargs):
self.common = {
'client': kwargs.get('client', 'paramiko'),
'username': kwargs.get('username'),
'password': kwargs.get('password', getpass()),
'ip': kwargs.get('ip'),
'port': kwargs.get('port'),
'dev_name': kwargs.get('dev_name', kwargs.get('ip')),
}
self.paramiko_option = {
'add_host_keys': kwargs.get('add_host_keys', True),
'look_for_keys': kwargs.get('look_for_keys', False),
'allow_agent': kwargs.get('allow_agent', False),
'load_host_keys': kwargs.get('load_host_keys', False),
'read_timeout': kwargs.get('read_timeout', 6.0),
'buffer_size': kwargs.get('buffer_size', 65535),
}
if 'paramiko' in self.common['client'].lower():
self.connection = self.get_paramiko_connection()
elif 'pexpect' in self.common['client'].lower():
self.connection = self.get_pexpect_connection()
else:
self.connection = None
raise "Invalid client type requested: ", self.common['client']
def get_paramiko_connection(self):
self.remote_conn_pre = paramiko.SSHClient()
if self.paramiko_option['add_host_keys']:
self.remote_conn_pre.set_missing_host_key_policy(
paramiko.AutoAddPolicy()
)
if self.paramiko_option['load_host_keys']:
self.remote_conn_pre.load_system_host_keys()
self.remote_conn_pre.connect(
self.common['ip'],
username=self.common['username'],
password=self.common['password'],
look_for_keys=self.paramiko_option['look_for_keys'],
allow_agent=self.paramiko_option['allow_agent'],
port=self.common['port']
)
try:
remote_conn = self.remote_conn_pre.invoke_shell()
remote_conn.settimeout(self.paramiko_option['read_timeout'])
return remote_conn
except Exception as e:
print "Got execption, could not invoke shell connection:", e
def get_pexpect_connection(self):
pass
def get_buffer(self, buffer_size=None):
if buffer_size is None:
buffer_size = self.paramiko_option['buffer_size']
raw_output = ""
while self.connection.recv_ready():
raw_output += self.connection.recv(buffer_size)
return raw_output.split('\n')
def send(self, command):
if not command.endswith('\n'):
command = command + '\n'
self.connection.send(command)
| apache-2.0 | -5,937,187,709,868,411,000 | 38.039474 | 74 | 0.587799 | false |
hachreak/invenio-records | examples/app.py | 1 | 2795 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Minimal Invenio-Records application example for development.
Create database and tables::
$ cd examples
$ export FLASK_APP=app.py
$ export FLASK_DEBUG=1
$ mkdir -p instance
$ flask db init
$ flask db create
Create test record::
$ echo '{"title": "Test title"}' | flask records create \
-i deadbeef-9fe4-43d3-a08f-38c2b309afba
Run the development server::
$ flask run
Retrieve record via web::
$ curl http://127.0.0.1:5000/deadbeef-9fe4-43d3-a08f-38c2b309afba
Retrieve record via shell::
$ flask shell
>>> from invenio_records.api import Record
>>> Record.get_record('deadbeef-9fe4-43d3-a08f-38c2b309afba')
"""
from __future__ import absolute_import, print_function
import os
import pkg_resources
from flask import Flask, jsonify, render_template
from flask_celeryext import create_celery_app
from invenio_db import InvenioDB
from invenio_records import InvenioRecords
try:
pkg_resources.get_distribution('invenio_pidstore')
except pkg_resources.DistributionNotFound:
HAS_PIDSTORE = False
else:
HAS_PIDSTORE = True
from invenio_pidstore import InvenioPIDStore
# Create Flask application
app = Flask(__name__)
app.config.update(
CELERY_ALWAYS_EAGER=True,
CELERY_CACHE_BACKEND="memory",
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_RESULT_BACKEND="cache",
SECRET_KEY="CHANGE_ME",
SECURITY_PASSWORD_SALT="CHANGE_ME_ALSO",
)
db_uri = os.environ.get('SQLALCHEMY_DATABASE_URI')
if db_uri is not None:
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
InvenioDB(app)
InvenioRecords(app)
if HAS_PIDSTORE:
InvenioPIDStore(app)
celery = create_celery_app(app)
@app.route("/<uuid>")
def index(uuid):
"""Retrieve record."""
from invenio_records.api import Record
return jsonify(Record.get_record(uuid))
| gpl-2.0 | 6,225,988,424,922,670,000 | 26.135922 | 76 | 0.732379 | false |
ligo-cbc/pycbc | pycbc/types/timeseries.py | 1 | 42880 | # Copyright (C) 2014 Tito Dal Canton, Josh Willis, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Provides a class representing a time series.
"""
from __future__ import division
import os as _os, h5py
from pycbc.types.array import Array, _convert, complex_same_precision_as, zeros
from pycbc.types.array import _nocomplex
from pycbc.types.frequencyseries import FrequencySeries
import lal as _lal
import numpy as _numpy
from scipy.io.wavfile import write as write_wav
class TimeSeries(Array):
"""Models a time series consisting of uniformly sampled scalar values.
Parameters
----------
initial_array : array-like
Array containing sampled data.
delta_t : float
Time between consecutive samples in seconds.
epoch : {None, lal.LIGOTimeGPS}, optional
Time of the first sample in seconds.
dtype : {None, data-type}, optional
Sample data type.
copy : boolean, optional
If True, samples are copied to a new array.
Attributes
----------
delta_t
duration
start_time
end_time
sample_times
sample_rate
"""
def __init__(self, initial_array, delta_t=None,
epoch=None, dtype=None, copy=True):
if len(initial_array) < 1:
raise ValueError('initial_array must contain at least one sample.')
if delta_t is None:
try:
delta_t = initial_array.delta_t
except AttributeError:
raise TypeError('must provide either an initial_array with a delta_t attribute, or a value for delta_t')
if not delta_t > 0:
raise ValueError('delta_t must be a positive number')
# Get epoch from initial_array if epoch not given (or is None)
# If initialy array has no epoch, set epoch to 0.
# If epoch is provided, use that.
if not isinstance(epoch, _lal.LIGOTimeGPS):
if epoch is None:
if isinstance(initial_array, TimeSeries):
epoch = initial_array._epoch
else:
epoch = _lal.LIGOTimeGPS(0)
elif epoch is not None:
try:
epoch = _lal.LIGOTimeGPS(epoch)
except:
raise TypeError('epoch must be either None or a lal.LIGOTimeGPS')
Array.__init__(self, initial_array, dtype=dtype, copy=copy)
self._delta_t = delta_t
self._epoch = epoch
def epoch_close(self, other):
""" Check if the epoch is close enough to allow operations """
dt = abs(float(self.start_time - other.start_time))
return dt <= 1e-7
def sample_rate_close(self, other):
""" Check if the sample rate is close enough to allow operations """
# compare our delta_t either to a another time series' or
# to a given sample rate (float)
if isinstance(other, TimeSeries):
odelta_t = other.delta_t
else:
odelta_t = 1.0/other
if (odelta_t - self.delta_t) / self.delta_t > 1e-4:
return False
if abs(1 - odelta_t / self.delta_t) * len(self) > 0.5:
return False
return True
def _return(self, ary):
return TimeSeries(ary, self._delta_t, epoch=self._epoch, copy=False)
def _typecheck(self, other):
if isinstance(other, TimeSeries):
if not self.sample_rate_close(other):
raise ValueError('different delta_t, {} vs {}'.format(
self.delta_t, other.delta_t))
if not self.epoch_close(other):
raise ValueError('different epoch, {} vs {}'.format(
self.start_time, other.start_time))
def _getslice(self, index):
# Set the new epoch---note that index.start may also be None
if index.start is None:
new_epoch = self._epoch
else:
if index.start < 0:
raise ValueError(('Negative start index ({})'
' not supported').format(index.start))
new_epoch = self._epoch + index.start * self._delta_t
if index.step is not None:
new_delta_t = self._delta_t * index.step
else:
new_delta_t = self._delta_t
return TimeSeries(Array._getslice(self, index), new_delta_t,
new_epoch, copy=False)
def prepend_zeros(self, num):
"""Prepend num zeros onto the beginning of this TimeSeries. Update also
epoch to include this prepending.
"""
self.resize(len(self) + num)
self.roll(num)
self._epoch = self._epoch - num * self._delta_t
def append_zeros(self, num):
"""Append num zeros onto the end of this TimeSeries.
"""
self.resize(len(self) + num)
def get_delta_t(self):
"""Return time between consecutive samples in seconds.
"""
return self._delta_t
delta_t = property(get_delta_t,
doc="Time between consecutive samples in seconds.")
def get_duration(self):
"""Return duration of time series in seconds.
"""
return len(self) * self._delta_t
duration = property(get_duration,
doc="Duration of time series in seconds.")
def get_sample_rate(self):
"""Return the sample rate of the time series.
"""
return 1.0/self.delta_t
sample_rate = property(get_sample_rate,
doc="The sample rate of the time series.")
def time_slice(self, start, end, mode='floor'):
"""Return the slice of the time series that contains the time range
in GPS seconds.
"""
if start < self.start_time:
raise ValueError('Time series does not contain a time as early as %s' % start)
if end > self.end_time:
raise ValueError('Time series does not contain a time as late as %s' % end)
start_idx = float(start - self.start_time) * self.sample_rate
end_idx = float(end - self.start_time) * self.sample_rate
if _numpy.isclose(start_idx, round(start_idx)):
start_idx = round(start_idx)
if _numpy.isclose(end_idx, round(end_idx)):
end_idx = round(end_idx)
if mode == 'floor':
start_idx = int(start_idx)
end_idx = int(end_idx)
elif mode == 'nearest':
start_idx = int(round(start_idx))
end_idx = int(round(end_idx))
else:
raise ValueError("Invalid mode: {}".format(mode))
return self[start_idx:end_idx]
@property
def delta_f(self):
"""Return the delta_f this ts would have in the frequency domain
"""
return 1.0 / self.duration
@property
def start_time(self):
"""Return time series start time as a LIGOTimeGPS.
"""
return self._epoch
@start_time.setter
def start_time(self, time):
""" Set the start time
"""
self._epoch = _lal.LIGOTimeGPS(time)
def get_end_time(self):
"""Return time series end time as a LIGOTimeGPS.
"""
return self._epoch + self.get_duration()
end_time = property(get_end_time,
doc="Time series end time as a LIGOTimeGPS.")
def get_sample_times(self):
"""Return an Array containing the sample times.
"""
if self._epoch is None:
return Array(range(len(self))) * self._delta_t
else:
return Array(range(len(self))) * self._delta_t + float(self._epoch)
sample_times = property(get_sample_times,
doc="Array containing the sample times.")
def at_time(self, time, nearest_sample=False):
""" Return the value at the specified gps time
"""
if nearest_sample:
time += self.delta_t / 2.0
return self[int((time-self.start_time)*self.sample_rate)]
def __eq__(self,other):
"""
This is the Python special method invoked whenever the '=='
comparison is used. It will return true if the data of two
time series are identical, and all of the numeric meta-data
are identical, irrespective of whether or not the two
instances live in the same memory (for that comparison, the
Python statement 'a is b' should be used instead).
Thus, this method returns 'True' if the types of both 'self'
and 'other' are identical, as well as their lengths, dtypes,
epochs, delta_ts and the data in the arrays, element by element.
It will always do the comparison on the CPU, but will *not* move
either object to the CPU if it is not already there, nor change
the scheme of either object. It is possible to compare a CPU
object to a GPU object, and the comparison should be true if the
data and meta-data of the two objects are the same.
Note in particular that this function returns a single boolean,
and not an array of booleans as Numpy does. If the numpy
behavior is instead desired it can be obtained using the numpy()
method of the PyCBC type to get a numpy instance from each
object, and invoking '==' on those two instances.
Parameters
----------
other: another Python object, that should be tested for equality
with 'self'.
Returns
-------
boolean: 'True' if the types, dtypes, lengths, epochs, delta_ts
and data of the two objects are each identical.
"""
if super(TimeSeries,self).__eq__(other):
return (self._epoch == other._epoch and self._delta_t == other._delta_t)
else:
return False
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the series.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the series.
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_elem(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False
def almost_equal_norm(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, normwise.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(norm(self-other)) <= tol*abs(norm(self)).
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(norm(self-other)) <= tol
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', based on their norms.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_norm(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False
@_convert
def lal(self):
"""Produces a LAL time series object equivalent to self.
Returns
-------
lal_data : {lal.*TimeSeries}
LAL time series object containing the same data as self.
The actual type depends on the sample's dtype. If the epoch of
self is 'None', the epoch of the returned LAL object will be
LIGOTimeGPS(0,0); otherwise, the same as that of self.
Raises
------
TypeError
If time series is stored in GPU memory.
"""
lal_data = None
ep = self._epoch
if self._data.dtype == _numpy.float32:
lal_data = _lal.CreateREAL4TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.float64:
lal_data = _lal.CreateREAL8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex64:
lal_data = _lal.CreateCOMPLEX8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex128:
lal_data = _lal.CreateCOMPLEX16TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
lal_data.data.data[:] = self.numpy()
return lal_data
def crop(self, left, right):
""" Remove given seconds from either end of time series
Parameters
----------
left : float
Number of seconds of data to remove from the left of the time series.
right : float
Number of seconds of data to remove from the right of the time series.
Returns
-------
cropped : pycbc.types.TimeSeries
The reduced time series
"""
if left + right > self.duration:
raise ValueError('Cannot crop more data than we have')
s = int(left * self.sample_rate)
e = len(self) - int(right * self.sample_rate)
return self[s:e]
def save_to_wav(self, file_name):
""" Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
"""
scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767)
write_wav(file_name, int(self.sample_rate), scaled)
def psd(self, segment_duration, **kwds):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
For more complete options, please see that function.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import welch
seg_len = int(segment_duration * self.sample_rate)
seg_stride = int(seg_len / 2)
return welch(self, seg_len=seg_len,
seg_stride=seg_stride,
**kwds)
def gate(self, time, window=0.25, method='taper', copy=True,
taper_width=0.25, invpsd=None):
""" Gate out portion of time series
Parameters
----------
time: float
Central time of the gate in seconds
window: float
Half-length in seconds to remove data around gate time.
method: str
Method to apply gate, options are 'hard', 'taper', and 'paint'.
copy: bool
If False, do operations inplace to this time series, else return
new time series.
taper_width: float
Length of tapering region on either side of excized data. Only
applies to the taper gating method.
invpsd: pycbc.types.FrequencySeries
The inverse PSD to use for painting method. If not given,
a PSD is generated using default settings.
Returns
-------
data: pycbc.types.TimeSeris
Gated time series
"""
data = self.copy() if copy else self
if method == 'taper':
from pycbc.strain import gate_data
return gate_data(data, [(time, window, taper_width)])
elif method == 'paint':
# Uses the hole-filling method of
# https://arxiv.org/pdf/1908.05644.pdf
from pycbc.strain.gate import gate_and_paint
if invpsd is None:
# These are some bare minimum settings, normally you
# should probably provide a psd
invpsd = 1. / self.filter_psd(self.duration/32, self.delta_f, 0)
lindex = int((time - window - self.start_time) / self.delta_t)
rindex = lindex + int(2 * window / self.delta_t)
lindex = lindex if lindex >= 0 else 0
rindex = rindex if rindex <= len(self) else len(self)
return gate_and_paint(data, lindex, rindex, invpsd, copy=False)
elif method == 'hard':
tslice = data.time_slice(time - window, time + window)
tslice[:] = 0
return data
else:
raise ValueError('Invalid method name: {}'.format(method))
def filter_psd(self, segment_duration, delta_f, flow):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
The psd is then truncated in the time domain to the segment duration
and interpolated to the requested sample frequency.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
delta_f : float
Frequency spacing to return psd at.
flow : float
The low frequency cutoff to apply when truncating the inverse
spectrum.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import interpolate, inverse_spectrum_truncation
p = self.psd(segment_duration)
samples = int(p.sample_rate * segment_duration)
p = interpolate(p, delta_f)
return inverse_spectrum_truncation(p, samples,
low_frequency_cutoff=flow,
trunc_method='hann')
def whiten(self, segment_duration, max_filter_duration, trunc_method='hann',
remove_corrupted=True, low_frequency_cutoff=None,
return_psd=False, **kwds):
""" Return a whitened time series
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
max_filter_duration : int
Maximum length of the time-domain filter in seconds.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the whitening
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
low_frequency_cutoff : {None, float}
Low frequency cutoff to pass to the inverse spectrum truncation.
This should be matched to a known low frequency cutoff of the
data if there is one.
return_psd : {False, Boolean}
Return the estimated and conditioned PSD that was used to whiten
the data.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
whitened_data : TimeSeries
The whitened time series
"""
from pycbc.psd import inverse_spectrum_truncation, interpolate
# Estimate the noise spectrum
psd = self.psd(segment_duration, **kwds)
psd = interpolate(psd, self.delta_f)
max_filter_len = int(max_filter_duration * self.sample_rate)
# Interpolate and smooth to the desired corruption length
psd = inverse_spectrum_truncation(psd,
max_filter_len=max_filter_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method=trunc_method)
# Whiten the data by the asd
white = (self.to_frequencyseries() / psd**0.5).to_timeseries()
if remove_corrupted:
white = white[int(max_filter_len/2):int(len(self)-max_filter_len/2)]
if return_psd:
return white, psd
return white
def qtransform(self, delta_t=None, delta_f=None, logfsteps=None,
frange=None, qrange=(4,64), mismatch=0.2, return_complex=False):
""" Return the interpolated 2d qtransform of this data
Parameters
----------
delta_t : {self.delta_t, float}
The time resolution to interpolate to
delta_f : float, Optional
The frequency resolution to interpolate to
logfsteps : int
Do a log interpolation (incompatible with delta_f option) and set
the number of steps to take.
frange : {(30, nyquist*0.8), tuple of ints}
frequency range
qrange : {(4, 64), tuple}
q range
mismatch : float
Mismatch between frequency tiles
return_complex: {False, bool}
return the raw complex series instead of the normalized power.
Returns
-------
times : numpy.ndarray
The time that the qtransform is sampled.
freqs : numpy.ndarray
The frequencies that the qtransform is sampled.
qplane : numpy.ndarray (2d)
The two dimensional interpolated qtransform of this time series.
"""
from pycbc.filter.qtransform import qtiling, qplane
from scipy.interpolate import interp2d
if frange is None:
frange = (30, int(self.sample_rate / 2 * 8))
q_base = qtiling(self, qrange, frange, mismatch)
_, times, freqs, q_plane = qplane(q_base, self.to_frequencyseries(),
return_complex=return_complex)
if logfsteps and delta_f:
raise ValueError("Provide only one (or none) of delta_f and logfsteps")
# Interpolate if requested
if delta_f or delta_t or logfsteps:
if return_complex:
interp_amp = interp2d(times, freqs, abs(q_plane))
interp_phase = interp2d(times, freqs, _numpy.angle(q_plane))
else:
interp = interp2d(times, freqs, q_plane)
if delta_t:
times = _numpy.arange(float(self.start_time),
float(self.end_time), delta_t)
if delta_f:
freqs = _numpy.arange(int(frange[0]), int(frange[1]), delta_f)
if logfsteps:
freqs = _numpy.logspace(_numpy.log10(frange[0]),
_numpy.log10(frange[1]),
logfsteps)
if delta_f or delta_t or logfsteps:
if return_complex:
q_plane = _numpy.exp(1.0j * interp_phase(times, freqs))
q_plane *= interp_amp(times, freqs)
else:
q_plane = interp(times, freqs)
return times, freqs, q_plane
def notch_fir(self, f1, f2, order, beta=5.0, remove_corrupted=True):
""" notch filter the time series using an FIR filtered generated from
the ideal response passed through a time-domain kaiser
window (beta = 5.0)
The suppression of the notch filter is related to the bandwidth and
the number of samples in the filter length. For a few Hz bandwidth,
a length corresponding to a few seconds is typically
required to create significant suppression in the notched band.
Parameters
----------
Time Series: TimeSeries
The time series to be notched.
f1: float
The start of the frequency suppression.
f2: float
The end of the frequency suppression.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
"""
from pycbc.filter import notch_fir
ts = notch_fir(self, f1, f2, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def lowpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True):
""" Lowpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be low-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the filtering
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
"""
from pycbc.filter import lowpass_fir
ts = lowpass_fir(self, frequency, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def highpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True):
""" Highpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be high-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the filtering
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
"""
from pycbc.filter import highpass_fir
ts = highpass_fir(self, frequency, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def fir_zero_filter(self, coeff):
"""Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
"""
from pycbc.filter import fir_zero_filter
return self._return(fir_zero_filter(coeff, self))
def save(self, path, group = None):
"""
Save time series to a Numpy .npy, hdf, or text file. The first column
contains the sample times, the second contains the values.
In the case of a complex time series saved as text, the imaginary
part is written as a third column. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
output = _numpy.vstack((self.sample_times.numpy(), self.numpy())).T
_numpy.save(path, output)
elif ext == '.txt':
if self.kind == 'real':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy())).T
elif self.kind == 'complex':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext =='.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'a') as f:
ds = f.create_dataset(key, data=self.numpy(),
compression='gzip',
compression_opts=9, shuffle=True)
ds.attrs['start_time'] = float(self.start_time)
ds.attrs['delta_t'] = float(self.delta_t)
else:
raise ValueError('Path must end with .npy, .txt or .hdf')
def to_timeseries(self):
""" Return time series"""
return self
@_nocomplex
def to_frequencyseries(self, delta_f=None):
""" Return the Fourier transform of this time series
Parameters
----------
delta_f : {None, float}, optional
The frequency resolution of the returned frequency series. By
default the resolution is determined by the duration of the timeseries.
Returns
-------
FrequencySeries:
The fourier transform of this time series.
"""
from pycbc.fft import fft
if not delta_f:
delta_f = 1.0 / self.duration
# add 0.5 to round integer
tlen = int(1.0 / delta_f / self.delta_t + 0.5)
flen = int(tlen / 2 + 1)
if tlen < len(self):
raise ValueError("The value of delta_f (%s) would be "
"undersampled. Maximum delta_f "
"is %s." % (delta_f, 1.0 / self.duration))
if not delta_f:
tmp = self
else:
tmp = TimeSeries(zeros(tlen, dtype=self.dtype),
delta_t=self.delta_t, epoch=self.start_time)
tmp[:len(self)] = self[:]
f = FrequencySeries(zeros(flen,
dtype=complex_same_precision_as(self)),
delta_f=delta_f)
fft(tmp, f)
return f
def inject(self, other, copy=True):
"""Return copy of self with other injected into it.
The other vector will be resized and time shifted with sub-sample
precision before adding. This assumes that one can assume zeros
outside of the original vector range.
"""
# only handle equal sample rate for now.
if not self.sample_rate_close(other):
raise ValueError('Sample rate must be the same')
# determine if we want to inject in place or not
if copy:
ts = self.copy()
else:
ts = self
# Other is disjoint
if ((other.start_time >= ts.end_time) or
(ts.start_time > other.end_time)):
return ts
other = other.copy()
dt = float((other.start_time - ts.start_time) * ts.sample_rate)
# This coaligns other to the time stepping of self
if not dt.is_integer():
diff = (dt - _numpy.floor(dt)) * ts.delta_t
# insert zeros at end
other.resize(len(other) + (len(other) + 1) % 2 + 1)
# fd shift to the right
other = other.cyclic_time_shift(diff)
# get indices of other with respect to self
# this is already an integer to floating point precission
left = float(other.start_time - ts.start_time) * ts.sample_rate
left = int(round(left))
right = left + len(other)
oleft = 0
oright = len(other)
# other overhangs on left so truncate
if left < 0:
oleft = -left
left = 0
# other overhangs on right so truncate
if right > len(ts):
oright = len(other) - (right - len(ts))
right = len(ts)
ts[left:right] += other[oleft:oright]
return ts
add_into = inject # maintain backwards compatibility for now
@_nocomplex
def cyclic_time_shift(self, dt):
"""Shift the data and timestamps by a given number of seconds
Shift the data and timestamps in the time domain a given number of
seconds. To just change the time stamps, do ts.start_time += dt.
The time shift may be smaller than the intrinsic sample rate of the data.
Note that data will be cyclically rotated, so if you shift by 2
seconds, the final 2 seconds of your data will now be at the
beginning of the data set.
Parameters
----------
dt : float
Amount of time to shift the vector.
Returns
-------
data : pycbc.types.TimeSeries
The time shifted time series.
"""
# We do this in the frequency domain to allow us to do sub-sample
# time shifts. This also results in the shift being circular. It
# is left to a future update to do a faster impelementation in the case
# where the time shift can be done with an exact number of samples.
return self.to_frequencyseries().cyclic_time_shift(dt).to_timeseries()
def match(self, other, psd=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
""" Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivalent to the overlap
maximized over time and phase. By default, the other vector will be
resized to match self. This may remove high frequency content or the
end of the vector.
Parameters
----------
other : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
"""
return self.to_frequencyseries().match(other, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
def detrend(self, type='linear'):
""" Remove linear trend from the data
Remove a linear trend from the data to improve the approximation that
the data is circularly convolved, this helps reduce the size of filter
transients from a circular convolution / filter.
Parameters
----------
type: str
The choice of detrending. The default ('linear') removes a linear
least squares fit. 'constant' removes only the mean of the data.
"""
from scipy.signal import detrend
return self._return(detrend(self.numpy(), type=type))
def plot(self, **kwds):
""" Basic plot of this time series
"""
from matplotlib import pyplot
if self.kind == 'real':
plot = pyplot.plot(self.sample_times, self, **kwds)
return plot
elif self.kind == 'complex':
plot1 = pyplot.plot(self.sample_times, self.real(), **kwds)
plot2 = pyplot.plot(self.sample_times, self.imag(), **kwds)
return plot1, plot2
def load_timeseries(path, group=None):
"""Load a TimeSeries from an HDF5, ASCII or Numpy file. The file type is
inferred from the file extension, which must be `.hdf`, `.txt` or `.npy`.
For ASCII and Numpy files, the first column of the array is assumed to
contain the sample times. If the array has two columns, a real-valued time
series is returned. If the array has three columns, the second and third
ones are assumed to contain the real and imaginary parts of a complex time
series.
For HDF files, the dataset is assumed to contain the attributes `delta_t`
and `start_time`, which should contain respectively the sampling period in
seconds and the start GPS time of the data.
The default data types will be double precision floating point.
Parameters
----------
path: string
Input file path. Must end with either `.npy`, `.txt` or `.hdf`.
group: string
Additional name for internal storage use. When reading HDF files, this
is the path to the HDF dataset to read.
Raises
------
ValueError
If path does not end in a supported extension.
For Numpy and ASCII input files, this is also raised if the array
does not have 2 or 3 dimensions.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'r') as f:
data = f[key][:]
series = TimeSeries(data, delta_t=f[key].attrs['delta_t'],
epoch=f[key].attrs['start_time'])
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
delta_t = (data[-1][0] - data[0][0]) / (len(data) - 1)
epoch = _lal.LIGOTimeGPS(data[0][0])
if data.ndim == 2:
return TimeSeries(data[:,1], delta_t=delta_t, epoch=epoch)
elif data.ndim == 3:
return TimeSeries(data[:,1] + 1j*data[:,2],
delta_t=delta_t, epoch=epoch)
raise ValueError('File has %s dimensions, cannot convert to TimeSeries, \
must be 2 (real) or 3 (complex)' % data.ndim)
| gpl-3.0 | -2,155,293,972,368,629,800 | 38.411765 | 120 | 0.587803 | false |
vsilent/smarty-bot | core/lib/julius/connect.py | 1 | 2602 | #! /usr/bin/python -u
# (Note: The -u disables buffering, as else we don't get Julius's output.)
#
# Command and Control Application for Julius
#
# How to use it:
# julius -quiet -input mic -C julian.jconf 2>/dev/null | ./command.py
#
# Copyright (C) 2008, 2009 Siegfried-Angel Gevatter Pujals <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Supported commands:
#
# This file is provided as an example, and should be modified to suit
# your needs. As is, it only supports a few commands and executes them on
# either Rhythmbox or Banshee.
from core.config import settings
from core.config.settings import logger
import sys
import os
import zmq
import pyjulius
import Queue
SERVICE_NAME = 'julius'
#prepare socket for smarty brain listener
context = zmq.Context()
sock = context.socket(zmq.REP)
sock.bind('ipc:///tmp/smarty-julius')
# Initialize and try to connect
client = pyjulius.Client('localhost', 10500)
try:
client.connect()
except pyjulius.ConnectionError:
print 'Start julius as module first!'
sys.exit(1)
# Start listening to the server
client.start()
try:
while 1:
#listen to command from main thread
try:
result = client.results.get(False)
if isinstance(result, pyjulius.Sentence):
logger.info('Julius connector got : %s' % result)
#print 'Sentence "%s" recognized with score %.2f' % (result, result.score)
req = sock.recv_json()
if req.get('read', None):
logger.info('Julius connector got : %s' % req)
sock.send_json({'request': str(result), 'from': SERVICE_NAME})
except Queue.Empty:
continue
#print repr(result)
except KeyboardInterrupt:
print 'Exiting...'
client.stop() # send the stop signal
client.join() # wait for the thread to die
client.disconnect() # disconnect from julius
| mit | -8,112,321,757,459,421,000 | 33.693333 | 90 | 0.664873 | false |
xxi511/CKFinder | CKCrawler.py | 1 | 3689 | from bs4 import BeautifulSoup as bs
from urllib.request import urlopen
from urllib.request import Request
import threading
import webbrowser
from collections import OrderedDict
class CKCrawler(object):
def __init__(self, tid, keywod, p1, p2):
homeData = self.getpageData(tid, 1)
lastPage = int(homeData.find('a', class_="last").string[3:])
p2 = lastPage if p2 == 99999 else p2
self.err = None
if p1 > lastPage or p2 > lastPage:
self.err = '好像沒這麼多頁喔'
return
self.donelist = []
self.findlist = {}
self.total = p2 - p1 + 1
self.tid = tid
self.th1, self.th2, self.th3 = self.createThread(p1, p2, tid, keywod)
def getpageData(self, tid, page):
url = 'https://ck101.com/thread-{}-{}-1.html'.format(tid, page)
req = Request(url, headers={'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'})
data = bs(urlopen(req).read(), 'lxml')
return data
def createThread(self, p1, p2, tid, keyword):
total = p2 - p1 + 1
def search(start, end):
for i in range(start, end):
data = self.getpageData(tid, i)
articles = data.find_all('td', class_="t_f")
for article in articles:
pid = article.attrs['id'].split('_')[-1]
content = article.text.replace('\r\n', '')
idx = content.find(keyword)
if idx == -1:
continue
num = 100
grabs = max(idx - num, 0)
grage = idx + len(keyword) + num
self.findlist[pid] = content[grabs:grage]
self.donelist.append(i)
if total <= 3:
th1 = threading.Thread(target=search, args=(p1, p2 + 1))
th2 = threading.Thread(target=search, args=(p1, p1))
th3 = threading.Thread(target=search, args=(p1, p1))
else:
gap = self.total // 3
s1, s2, s3, s4 = p1, p1 + gap, p1 + 2 * gap, p2 + 1
th1 = threading.Thread(target=search, args=(s1, s2))
th2 = threading.Thread(target=search, args=(s2, s3))
th3 = threading.Thread(target=search, args=(s3, s4))
return th1, th2, th3
def startThread(self):
self.th1.start()
self.th2.start()
self.th3.start()
def openHtml(self):
message = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style>
body {background-color: #bcbcbc; font-family: "Microsoft JhengHei", "Times New Roman";}
a {background-color: #ceecec; display:block; width: 50%;
padding: 20px; border-radius: 15px; -moz-border-radius: 15px;
text-decoration:none;color:black; white-space: pre-line; margin: auto;}
a:visited {background-color: #ececec;}
</style>
</head>
<body>
"""
sortedDict = OrderedDict(sorted(self.findlist.items()))
for key, val in sortedDict.items():
message += self.herfModule(key, val)
message += """
</body>
</html>
"""
with open('result.html', 'w', encoding='utf-16-le') as f:
f.write(message)
webbrowser.open_new_tab('result.html')
def herfModule(self, pid, world):
url = 'https://ck101.com/forum.php?mod=redirect&goto=findpost&ptid={}&pid={}'.format(self.tid, pid)
return """<a href="{}" target="_blank">{}</a>
<br>""".format(url, world) | mit | 2,669,349,828,524,851,000 | 35.74 | 118 | 0.531718 | false |
google/compynator | tests/test_niceties.py | 1 | 5249 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import string
import unittest
from compynator import *
class NicetiesTest(unittest.TestCase):
def _test_class(self, p, char_class):
self.assertFalse(p(''))
for c in (chr(x) for x in range(256)):
if c in char_class:
self.assertEqual(p(c), Succeed(c)(''))
self.assertEqual(p(c + 'abc'), Succeed(c)('abc'))
else:
self.assertFalse(p(c))
def test_digit(self):
self._test_class(Digit, string.digits)
def test_hexdigit(self):
self._test_class(HexDigit, string.digits + 'abcdefABCDEF')
def test_octdigit(self):
self._test_class(OctDigit, string.octdigits)
def test_lower(self):
self._test_class(Lower, string.ascii_lowercase)
def test_upper(self):
self._test_class(Upper, string.ascii_uppercase)
def test_alpha(self):
self._test_class(Alpha, string.ascii_letters)
def test_alnum(self):
self._test_class(Alnum, string.ascii_letters + string.digits)
def test_regex(self):
r = Regex(re.compile('a+'))
self.assertFalse(r('b'))
self.assertEqual(r('ab'), Succeed('a')('b'))
self.assertEqual(r('aab'), Succeed('aa')('b'))
def test_iterminal(self):
true = ITerminal('t')
self.assertEqual(true('t'), Succeed('t')(''))
self.assertEqual(true('T'), Succeed('T')(''))
self.assertEqual(true('ta'), Succeed('t')('a'))
self.assertEqual(true('Ta'), Succeed('T')('a'))
ab = ITerminal('ab')
self.assertFalse(ab(''))
self.assertFalse(ab('a'))
self.assertFalse(ab('Aa'))
self.assertEqual(ab('ab'), Succeed('ab')(''))
self.assertEqual(ab('aB'), Succeed('aB')(''))
self.assertEqual(ab('Ab'), Succeed('Ab')(''))
self.assertEqual(ab('AB'), Succeed('AB')(''))
self.assertEqual(ab('aBc'), Succeed('aB')('c'))
def test_collect_okay(self):
zero = Terminal('0')
one = Terminal('1')
bit = zero | one
p = Collect(bit, bit, bit)
for a in '01':
for b in '01':
for c in '01':
bits = a + b + c
self.assertEqual(set(p(bits)), {Result((a, b, c), '')})
def test_collect_nondet(self):
bit = Terminal('a').repeat(0, 1, take_all=True)
p = Collect(bit, bit, bit)
self.assertEqual(len(p('')), 1) # -/-/-
self.assertEqual(len(p('a')), 4) # a/-/-, -/a/-, -/-/a
self.assertEqual(len(p('aa')), 7) # a/a/-, a/-/a, -/a/a
self.assertEqual(len(p('aaa')), 8) # a/a/a
def test_collect_fail(self):
zero = Terminal('0').repeat(1, 2, take_all=True)
p = Collect(zero, zero, zero)
rs = p('00a')
self.assertFalse(rs)
def test_context_sensitive(self):
start_tag = Terminal('<').then(Alpha.repeat(1)).skip('>')
def end_tag(name):
return Terminal('</').then(Terminal(name)).skip('>')
tag = start_tag.then(lambda tag: body.skip(end_tag(tag)))
body = tag | Alnum.repeat()
self.assertEqual(tag('<b>bold</b>'), Succeed('bold')(''))
self.assertEqual(tag('<b><i>bi</i></b>'), Succeed('bi')(''))
self.assertFalse(tag('<b><i>fail</b></i>'))
self.assertFalse(tag('<b><i>fail</i>'))
self.assertFalse(tag('<b><i>fail</b>'))
def test_lookahead_true(self):
# Taken from https://en.wikipedia.org/wiki/Parsing_expression_grammar.
B = Forward()
B.is_(Terminal('b') + B.repeat(0, 1) + 'c')
A = Forward()
A.is_(Terminal('a') + A.repeat(0, 1) + 'b')
S = (Lookahead(A + 'c') + Terminal('a').repeat(1) + B).filter(
lambda rs: not rs.remain)
self.assertFalse(S('aabc'))
self.assertFalse(S('abbc'))
self.assertFalse(S('abcc'))
self.assertEqual(S('abc'), Succeed('abc')(''))
self.assertEqual(S('aabbcc'), Succeed('aabbcc')(''))
self.assertEqual(S('aaabbbccc'), Succeed('aaabbbccc')(''))
self.assertFalse(S('aaabbccc'))
self.assertFalse(S('aaabbbcc'))
self.assertFalse(S('aabbbccc'))
def test_lookahead_false(self):
# Taken from https://en.wikipedia.org/wiki/Parsing_expression_grammar.
a = Terminal('a')
b = Terminal('b')
a_but_not_ab = Lookahead(a + b, take_if=False) + a
self.assertEqual(a_but_not_ab('a'), Succeed('a')(''))
self.assertFalse(a_but_not_ab('ab'))
self.assertEqual(a_but_not_ab('ac'), Succeed('a')('c'))
self.assertFalse(a_but_not_ab('ba'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,231,996,535,187,309,600 | 35.706294 | 78 | 0.563726 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/errors/types/conversion_custom_variable_error.py | 1 | 1244 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.errors',
marshal='google.ads.googleads.v7',
manifest={
'ConversionCustomVariableErrorEnum',
},
)
class ConversionCustomVariableErrorEnum(proto.Message):
r"""Container for enum describing possible conversion custom
variable errors.
"""
class ConversionCustomVariableError(proto.Enum):
r"""Enum describing possible conversion custom variable errors."""
UNSPECIFIED = 0
UNKNOWN = 1
DUPLICATE_NAME = 2
DUPLICATE_TAG = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 7,988,404,719,535,612,000 | 30.1 | 74 | 0.704984 | false |
PanDAWMS/panda-bigmon-lsst | lsst/settings/config.py | 1 | 7652 |
from os.path import dirname, join
import core
import lsst
import filebrowser
import pbm
from lsst import admin
#from local import defaultDatabase, MY_SECRET_KEY
from local import dbaccess, MY_SECRET_KEY
### VIRTUALENV
#VIRTUALENV_PATH = '/data/virtualenv/django1.6.1__python2.6.6'
#VIRTUALENV_PATH = '/data/virtualenv/django1.6.1__python2.6.6__lsst'
#VIRTUALENV_PATH = '/data/wenaus/virtualenv/twdev__django1.6.1__python2.6.6__lsst'
VIRTUALENV_PATH = '/data/wenaus/virtualenv/twrpm'
### WSGI
WSGI_PATH = VIRTUALENV_PATH + '/pythonpath'
### DB_ROUTERS for atlas's prodtask
DATABASE_ROUTERS = [\
'atlas.dbrouter.ProdMonDBRouter', \
'pbm.dbrouter.PandaBrokerageMonDBRouter', \
]
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
join(dirname(core.common.__file__), 'static'),
# join(dirname(lsst.__file__), 'static'),
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
join(dirname(lsst.__file__), 'templates'),
join(dirname(admin.__file__), 'templates'),
join(dirname(core.common.__file__), 'templates'),
join(dirname(filebrowser.__file__), 'templates'),
join(dirname(pbm.__file__), 'templates'),
)
STATIC_ROOT = join(dirname(lsst.__file__), 'static')
#STATIC_ROOT = None
MEDIA_ROOT = join(dirname(lsst.__file__), 'media')
STATIC_URL_BASE = '/static/'
MEDIA_URL_BASE = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = MY_SECRET_KEY
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# DATABASES = {
# # 'default': {
# # 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
# # 'NAME': '', # Or path to database file if using sqlite3.
# # 'USER': '', # Not used with sqlite3.
# # 'PASSWORD': '', # Not used with sqlite3.
# # 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
# # 'PORT': '', # Set to empty string for default. Not used with sqlite3.
# # }
# 'default': defaultDatabase
# }
DATABASES = dbaccess
### URL_PATH_PREFIX for multi-developer apache/wsgi instance
### on EC2: URL_PATH_PREFIX = '/bigpandamon' or URL_PATH_PREFIX = '/developersprefix'
#URL_PATH_PREFIX = '/lsst'
#URL_PATH_PREFIX = '/twrpmlsst'
URL_PATH_PREFIX = '/lsst'
#URL_PATH_PREFIX = ''
### on localhost:8000: URL_PATH_PREFIX = '/.'
#URL_PATH_PREFIX = ''
MEDIA_URL = URL_PATH_PREFIX + MEDIA_URL_BASE
STATIC_URL = URL_PATH_PREFIX + STATIC_URL_BASE
#LOG_ROOT = '/data/bigpandamon_virtualhosts/lsst/logs'
#LOG_ROOT = '/data/wenaus/logs'
LOG_ROOT = '/data/wenaus/bigpandamon_virtualhosts/twrpm/logs'
LOG_SIZE = 1000000000
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
# 'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile-bigpandamon': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.bigpandamon",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'logfile-django': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.django",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'logfile-viewdatatables': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.viewdatatables",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'logfile-rest': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.rest",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'logfile-api_reprocessing': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.api_reprocessing",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'logfile-filebrowser': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.filebrowser",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'logfile-pbm': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile.pbm",
'maxBytes': LOG_SIZE,
'backupCount': 2,
'formatter': 'verbose',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
'class':'logging.StreamHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
# 'level': 'ERROR',
'level': 'DEBUG',
'propagate': True,
},
'django': {
'handlers':['logfile-django'],
'propagate': True,
'level':'DEBUG',
},
'django_datatables_view': {
'handlers':['logfile-viewdatatables'],
'propagate': True,
'level':'DEBUG',
},
'rest_framework': {
'handlers':['logfile-rest'],
'propagate': True,
'level':'DEBUG',
},
'bigpandamon': {
'handlers': ['logfile-bigpandamon'],
'level': 'DEBUG',
},
'api_reprocessing':{
'handlers': ['logfile-api_reprocessing'],
'level': 'DEBUG',
},
'bigpandamon-filebrowser':{
'handlers': ['logfile-filebrowser'],
'level': 'DEBUG',
},
'bigpandamon-pbm':{
'handlers': ['logfile-pbm'],
'level': 'DEBUG',
}
},
'formatters': {
'verbose': {
# 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
'format': '%(asctime)s %(module)s %(name)-12s:%(lineno)d %(levelname)-5s %(message)s'
},
'simple': {
'format': '%(levelname)s %(name)-12s:%(lineno)d %(message)s'
},
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_ROOT + "/logfile",
'maxBytes': 10000000,
'backupCount': 5,
'formatter': 'verbose',
},
}
ENV = {
### Application name
'APP_NAME': "PanDA Monitor", \
### Page title default
'PAGE_TITLE': "PanDA Monitor", \
### Menu item separator
'SEPARATOR_MENU_ITEM': " ", \
### Navigation chain item separator
'SEPARATOR_NAVIGATION_ITEM': " » " , \
}
| apache-2.0 | 4,741,140,407,640,507,000 | 31.841202 | 104 | 0.542473 | false |
flavour/eden | models/000_1st_run.py | 5 | 3942 | # -*- coding: utf-8 -*-
"""
1st RUN:
- Run update_check if needed.
- Import the S3 Framework Extensions
- If needed, copy deployment templates to the live installation.
"""
# Debug why Eclipse breakpoints are ignored
# http://stackoverflow.com/questions/29852426/pydev-ignoring-breakpoints
#import sys
#def trace_func(frame, event, arg):
# print 'Context: ', frame.f_code.co_name, '\tFile:', frame.f_code.co_filename, '\tLine:', frame.f_lineno, '\tEvent:', event
# return trace_func
#sys.settrace(trace_func)
# -----------------------------------------------------------------------------
# Perform update checks - will happen in 1st_run or on those upgrades when new
# dependencies have been added.
# Increment this when new dependencies are added
# This will be compared to the version in the 0000_update_check.py 'canary' file.
CURRENT_UPDATE_CHECK_ID = 4
update_check_needed = False
try:
if CANARY_UPDATE_CHECK_ID != CURRENT_UPDATE_CHECK_ID:
update_check_needed = True
except NameError:
update_check_needed = True
# shortcut
appname = request.application
if update_check_needed:
# @ToDo: Load deployment_settings so that we can configure the update_check
# - need to rework so that 000_config.py is parsed 1st
import s3cfg
settings = s3cfg.S3Config()
# Run update checks
from s3_update_check import update_check
errors = []
warnings = []
messages = update_check(settings)
errors.extend(messages.get("error_messages", []))
warnings.extend(messages.get("warning_messages", []))
# Catch-all check for dependency errors.
# NB This does not satisfy the goal of calling out all the setup errors
# at once - it will die on the first fatal error encountered.
try:
import s3 as s3base
except Exception as e:
errors.append(e.message)
import sys
if warnings:
# Report (non-fatal) warnings.
prefix = "\n%s: " % T("WARNING")
sys.stderr.write("%s%s\n" % (prefix, prefix.join(warnings)))
if errors:
# Report errors and stop.
actionrequired = T("ACTION REQUIRED")
prefix = "\n%s: " % actionrequired
sys.stderr.write("%s%s\n" % (prefix, prefix.join(errors)))
htmlprefix = "\n<br /><b>%s</b>: " % actionrequired
html = "<errors>" + htmlprefix + htmlprefix.join(errors) + "\n</errors>"
raise HTTP(500, body=html)
# Create or update the canary file.
from s3dal import portalocker
canary = open("applications/%s/models/0000_update_check.py" % appname, "w")
portalocker.lock(canary, portalocker.LOCK_EX)
statement = "CANARY_UPDATE_CHECK_ID = %s" % CURRENT_UPDATE_CHECK_ID
canary.write(statement)
canary.close()
# -----------------------------------------------------------------------------
import os
from collections import OrderedDict
from gluon import current
from gluon.storage import Storage
# Keep all S3 framework-level elements stored in response.s3, so as to avoid
# polluting global namespace & to make it clear which part of the framework is
# being interacted with.
# Avoid using this where a method parameter could be used:
# http://en.wikipedia.org/wiki/Anti_pattern#Programming_anti-patterns
response.s3 = Storage()
s3 = response.s3
s3.gis = Storage() # Defined early for use by S3Config.
current.cache = cache
# Limit for filenames on filesystem:
# https://en.wikipedia.org/wiki/Comparison_of_file_systems#Limits
# NB This takes effect during the file renaming algorithm - the length of uploaded filenames is unaffected
current.MAX_FILENAME_LENGTH = 255 # Defined early for use by S3Config.
# Common compat imports (for controllers)
from s3compat import basestring, long, reduce, xrange
# Import S3Config
import s3cfg
settings = s3cfg.S3Config()
current.deployment_settings = deployment_settings = settings
# END =========================================================================
| mit | -5,285,280,050,453,982,000 | 35.5 | 127 | 0.658803 | false |
iABC2XYZ/abc | Epics/DataAna8.py | 1 | 5336 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 15:44:34 2017
@author: p
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.close('all')
def GenWeight(shape):
initial = tf.truncated_normal(shape, stddev=1.)
return tf.Variable(initial)
def GenBias(shape):
initial = tf.constant(1., shape=shape)
return tf.Variable(initial)
def getDataRow(exData,sizeRow):
numEx=np.shape(exData)[0]
idChoose=np.random.randint(0,high=numEx,size=(sizeRow))
yCHV=np.reshape(exData[idChoose,0:14],(sizeRow,7,2))
xBPM=np.reshape(exData[idChoose,14:24],(sizeRow,5,2))
return xBPM,yCHV
def conv1d(x, W):
return tf.nn.conv1d(x, W, stride=1, padding="SAME")
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME")
exData=np.loadtxt('/home/e/ABC/abc/Epics/Rec.dat')
bpm=tf.placeholder(tf.float32,shape=(None,5,2))
cHV=tf.placeholder(tf.float32,shape=(None,7,2))
xInput=bpm
yInput=cHV
#
nChan1=200
w1= GenWeight([1,2,nChan1])
b1=GenBias([nChan1])
x1=tf.nn.relu(conv1d(xInput, w1)+b1)
#
nChan2=1
n2=nChan1/nChan2
x2=tf.reshape(x1,(-1,5,n2,nChan2))
#
nChan3=13
w3= GenWeight([1,1,nChan2,nChan3])
b3=GenBias([nChan3])
x3=tf.nn.relu(conv2d(x2, w3)+b3)
#
nChan4=13
w4= GenWeight([2,2,nChan2,nChan4])
b4=GenBias([nChan4])
x4=tf.nn.relu(conv2d(x2, w4)+b4)
#
nChan5=13
w5= GenWeight([3,3,nChan2,nChan5])
b5=GenBias([nChan5])
x5=tf.nn.relu(conv2d(x2, w5)+b5)
#
x6=tf.concat((tf.concat((x3,x4),axis=3),x5),axis=3)
#
nChan7=5
w7= GenWeight([3,3,nChan3+nChan4+nChan5,nChan7])
b7=GenBias([nChan7])
x7=tf.nn.relu(conv2d(x6, w7)+b7)
#
x8=tf.reshape(x7,(-1,5*n2*nChan7))
#
w9=GenWeight([5*n2*nChan7,14])
b9=GenBias([14])
x9=tf.matmul(x8,w9)+b9
#
n9_2=250
w9_2=GenWeight([5*n2*nChan7,n9_2])
b9_2=GenBias([n9_2])
x9_2=tf.nn.relu(tf.matmul(x8,w9_2)+b9_2)
#
w10_2=GenWeight([n9_2,14])
b10_2=GenBias([14])
x10_2=tf.matmul(x9_2,w10_2)+b10_2
##
xFinal=x10_2
xOutput=tf.reshape(xFinal,(-1,14))
yOutput=tf.reshape(yInput,(-1,14))
lossFn=tf.reduce_mean(tf.square(xOutput-yOutput))
trainBPM=tf.train.AdamOptimizer(0.005)
optBPM=trainBPM.minimize(lossFn)
iniBPM=tf.global_variables_initializer()
try:
if vars().has_key('se'):
se.close()
except:
pass
se= tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
se.run(iniBPM)
nIt=2e4
sizeRow=100
stepLossRec=50
nLossRec=np.int32(nIt/stepLossRec+1)
lossRec=np.zeros((nLossRec))
iRec=0
for i in range(np.int32(nIt)):
xBPM,yCHV=getDataRow(exData,sizeRow)
se.run(optBPM,feed_dict={bpm:xBPM,cHV:yCHV})
if i % stepLossRec==0:
lossRecTmp=se.run(lossFn,feed_dict={bpm:xBPM,cHV:yCHV})
lossRec[iRec]=lossRecTmp
iRec+=1
print lossRecTmp
'''
plt.figure(1)
plt.hold
plt.plot(iRec,lossRecTmp,'*b')
if iRec==15:
plt.close(plt.figure(1))
#if iRec>6:
# plt.plot(iRec,np.mean(lossRec[5:iRec]),'*r')
if iRec>15:
plt.plot(iRec,np.mean(lossRec[iRec-15:iRec]),'go')
'''
plt.figure('lossRec')
numPlot=30
plt.clf()
if iRec<=numPlot:
xPlot=np.linspace(0,iRec-1,iRec)
yPlot=lossRec[0:iRec:]
yPlotMean=np.cumsum(yPlot)/(xPlot+1)
else:
xPlot=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlot=lossRec[iRec-numPlot:iRec:]
yPlotMean[0:-1:]=yPlotMean[1::]
yPlotMean[-1]=np.mean(yPlot)
plt.hold
plt.plot(xPlot,yPlot,'*b')
plt.plot(xPlot,yPlotMean,'go')
plt.grid('on')
plt.title(i)
plt.pause(0.05)
xBPM,yCHV=getDataRow(exData,5)
yCHV_Cal=se.run(xFinal,feed_dict={bpm:xBPM})
plt.figure(2)
plt.clf()
plt.hold
#plt.plot(yCHV[0,:],'b*')
#plt.plot(yCHV_Cal[0,:],'r*')
#plt.plot(yCHV[1,:],'bo')
#plt.plot(yCHV_Cal[1,:],'ro')
#plt.plot(yCHV[2,:],'b^')
#plt.plot(yCHV_Cal[2,:],'r^')
#plt.plot(yCHV[3,:],'bs')
#plt.plot(yCHV_Cal[3,:],'rs')
plt.plot(np.reshape(yCHV[4,:],(14)),'bd')
plt.plot(yCHV_Cal[4,:],'rd')
plt.title(i)
plt.pause(0.05)
#se.close()
xBPMReal_1=np.ones((5,2))*0.
xBPMReal_2=np.ones((5,2))*3.
xBPMReal_3=np.ones((5,2))*(-3.)
xBPMReal_4=np.ones((5,2))
xBPMReal_4[:,0]=xBPMReal_4[:,0]*3.
xBPMReal_4[:,1]=xBPMReal_4[:,1]*(-3.)
xBPMReal=np.zeros((4,5,2))
xBPMReal[0,:,:]=xBPMReal_1
xBPMReal[1,:,:]=xBPMReal_2
xBPMReal[2,:,:]=xBPMReal_3
xBPMReal[3,:,:]=xBPMReal_4
yCHV_Cal4Real=se.run(xFinal,feed_dict={bpm:xBPMReal})
yCHV_Cal4Real_1=np.reshape(yCHV_Cal4Real[0,::],(7,2))
yCHV_Cal4Real_2=np.reshape(yCHV_Cal4Real[1,::],(7,2))
yCHV_Cal4Real_3=np.reshape(yCHV_Cal4Real[2,::],(7,2))
yCHV_Cal4Real_4=np.reshape(yCHV_Cal4Real[3,::],(7,2))
print '----------------- yCHV_Cal4Real_1 --------------------------'
print yCHV_Cal4Real_1
print '----------------- yCHV_Cal4Real_2 --------------------------'
print yCHV_Cal4Real_2
print '----------------- yCHV_Cal4Real_3 --------------------------'
print yCHV_Cal4Real_3
print '----------------- yCHV_Cal4Real_4 --------------------------'
print yCHV_Cal4Real_4
| gpl-3.0 | -3,398,072,958,445,177,300 | 19.288973 | 75 | 0.589018 | false |
Paricitoi/python_4_eng | python_week2/solution_week2_ex2a.py | 1 | 1823 | #!/usr/bin/env python
'''
Write a script that connects to the lab pynet-rtr1, logins, and executes the
'show ip int brief' command.
'''
import telnetlib
import time
import socket
import sys
import getpass
TELNET_PORT = 23
TELNET_TIMEOUT = 6
def send_command(remote_conn, cmd):
'''
Send a command down the telnet channel
Return the response
'''
cmd = cmd.rstrip()
remote_conn.write(cmd + '\n')
time.sleep(1)
return remote_conn.read_very_eager()
def login(remote_conn, username, password):
'''
Login to network device
'''
output = remote_conn.read_until("sername:", TELNET_TIMEOUT)
remote_conn.write(username + '\n')
output += remote_conn.read_until("ssword:", TELNET_TIMEOUT)
remote_conn.write(password + '\n')
return output
def disable_paging(remote_conn, paging_cmd='terminal length 0'):
'''
Disable the paging of output (i.e. --More--)
'''
return send_command(remote_conn, paging_cmd)
def telnet_connect(ip_addr):
'''
Establish telnet connection
'''
try:
return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT)
except socket.timeout:
sys.exit("Connection timed-out")
def main():
'''
Write a script that connects to the lab pynet-rtr1, logins, and executes the
'show ip int brief' command.
'''
ip_addr = raw_input("IP address: ")
ip_addr = ip_addr.strip()
username = 'pyclass'
password = getpass.getpass()
remote_conn = telnet_connect(ip_addr)
output = login(remote_conn, username, password)
time.sleep(1)
remote_conn.read_very_eager()
disable_paging(remote_conn)
output = send_command(remote_conn, 'show ip int brief')
print "\n\n"
print output
print "\n\n"
remote_conn.close()
if __name__ == "__main__":
main()
| gpl-3.0 | -1,532,054,509,643,778,300 | 22.371795 | 80 | 0.643993 | false |
cbertinato/pandas | pandas/tests/tseries/frequencies/test_freq_code.py | 1 | 4666 | import pytest
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.frequencies import (
FreqGroup, _period_code_map, get_freq, get_freq_code)
import pandas.tseries.offsets as offsets
@pytest.fixture(params=list(_period_code_map.items()))
def period_code_item(request):
return request.param
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000),
("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("W", 4000), ("W-MON", 4001), ("W-FRI", 4005)
])
def test_freq_code(freqstr, expected):
assert get_freq(freqstr) == expected
def test_freq_code_match(period_code_item):
freqstr, code = period_code_item
assert get_freq(freqstr) == code
@pytest.mark.parametrize("freqstr,expected", [
("A", 1000), ("3A", 1000), ("-1A", 1000), ("A-JAN", 1000),
("A-MAY", 1000), ("Y", 1000), ("3Y", 1000), ("-1Y", 1000),
("Y-JAN", 1000), ("Y-MAY", 1000), (offsets.YearEnd(), 1000),
(offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000),
("W", 4000), ("W-MON", 4000), ("W-FRI", 4000), (offsets.Week(), 4000),
(offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000),
("T", FreqGroup.FR_MIN),
])
def test_freq_group(freqstr, expected):
assert resolution.get_freq_group(freqstr) == expected
def test_freq_group_match(period_code_item):
freqstr, code = period_code_item
str_group = resolution.get_freq_group(freqstr)
code_group = resolution.get_freq_group(code)
assert str_group == code_group == code // 1000 * 1000
@pytest.mark.parametrize("freqstr,exp_freqstr", [
("D", "D"), ("W", "D"), ("M", "D"),
("S", "S"), ("T", "S"), ("H", "S")
])
def test_get_to_timestamp_base(freqstr, exp_freqstr):
tsb = libfrequencies.get_to_timestamp_base
assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0]
_reso = resolution.Resolution
@pytest.mark.parametrize("freqstr,expected", [
("A", "year"), ("Q", "quarter"), ("M", "month"),
("D", "day"), ("H", "hour"), ("T", "minute"),
("S", "second"), ("L", "millisecond"),
("U", "microsecond"), ("N", "nanosecond")
])
def test_get_str_from_freq(freqstr, expected):
assert _reso.get_str_from_freq(freqstr) == expected
@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H",
"T", "S", "L", "U", "N"])
def test_get_freq_roundtrip(freq):
result = _reso.get_freq(_reso.get_str_from_freq(freq))
assert freq == result
@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"])
def test_get_freq_roundtrip2(freq):
result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq)))
assert freq == result
@pytest.mark.parametrize("args,expected", [
((1.5, "T"), (90, "S")), ((62.4, "T"), (3744, "S")),
((1.04, "H"), (3744, "S")), ((1, "D"), (1, "D")),
((0.342931, "H"), (1234551600, "U")), ((1.2345, "D"), (106660800, "L"))
])
def test_resolution_bumping(args, expected):
# see gh-14378
assert _reso.get_stride_from_decimal(*args) == expected
@pytest.mark.parametrize("args", [
(0.5, "N"),
# Too much precision in the input can prevent.
(0.3429324798798269273987982, "H")
])
def test_cat(args):
msg = "Could not convert to integer offset at any resolution"
with pytest.raises(ValueError, match=msg):
_reso.get_stride_from_decimal(*args)
@pytest.mark.parametrize("freq_input,expected", [
# Frequency string.
("A", (get_freq("A"), 1)),
("3D", (get_freq("D"), 3)),
("-2M", (get_freq("M"), -2)),
# Tuple.
(("D", 1), (get_freq("D"), 1)),
(("A", 3), (get_freq("A"), 3)),
(("M", -2), (get_freq("M"), -2)),
((5, "T"), (FreqGroup.FR_MIN, 5)),
# Numeric Tuple.
((1000, 1), (1000, 1)),
# Offsets.
(offsets.Day(), (get_freq("D"), 1)),
(offsets.Day(3), (get_freq("D"), 3)),
(offsets.Day(-2), (get_freq("D"), -2)),
(offsets.MonthEnd(), (get_freq("M"), 1)),
(offsets.MonthEnd(3), (get_freq("M"), 3)),
(offsets.MonthEnd(-2), (get_freq("M"), -2)),
(offsets.Week(), (get_freq("W"), 1)),
(offsets.Week(3), (get_freq("W"), 3)),
(offsets.Week(-2), (get_freq("W"), -2)),
(offsets.Hour(), (FreqGroup.FR_HR, 1)),
# Monday is weekday=0.
(offsets.Week(weekday=1), (get_freq("W-TUE"), 1)),
(offsets.Week(3, weekday=0), (get_freq("W-MON"), 3)),
(offsets.Week(-2, weekday=4), (get_freq("W-FRI"), -2)),
])
def test_get_freq_code(freq_input, expected):
assert get_freq_code(freq_input) == expected
def test_get_code_invalid():
with pytest.raises(ValueError, match="Invalid frequency"):
get_freq_code((5, "baz"))
| bsd-3-clause | -8,814,508,839,053,833,000 | 30.527027 | 75 | 0.576511 | false |
xflows/textflows | streams/views.py | 1 | 1151 | # helperji, context stvari
from django.shortcuts import render, get_object_or_404, redirect
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.core import serializers
# from django.utils import simplejson
from workflows.urls import *
from workflows.helpers import *
import workflows.interaction_views
import workflows.visualization_views
import sys
import traceback
# modeli
from workflows.models import *
from django.contrib.auth.models import User
from workflows.utils import *
# auth fore
from django.contrib.auth.decorators import login_required
#settings
from mothra.settings import DEBUG, FILES_FOLDER
from streams.models import Stream
import workflows.views
#ostalo
import os
def stream_widget_visualization(request,stream_id,widget_id):
stream = get_object_or_404(Stream,pk=stream_id)
widget = get_object_or_404(Widget,pk=widget_id)
if widget.abstract_widget.streaming_visualization_view == '':
return Http404
else:
view_to_call = getattr(workflows.views,widget.abstract_widget.streaming_visualization_view)
return view_to_call(request,widget,stream)
| mit | 3,006,428,727,042,375,700 | 27.073171 | 99 | 0.785404 | false |
franramirez688/Taric-Challange | taric_challange/gui/widgets/books_list.py | 1 | 1089 | from PyQt4 import QtGui
class BooksListWidget(QtGui.QWidget):
""" Uneditable list's books """
def __init__(self, label):
super(BooksListWidget, self).__init__()
# init label and table widgets
self.title_label = QtGui.QLabel(label)
self.books_list = QtGui.QListView()
# List settings
self.books_list.minimumHeight()
# Make the list uneditable
self.books_list.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
# Create a model for the list's books
self.model = QtGui.QStringListModel()
# Apply the model to the list view
self.books_list.setModel(self.model)
# Create the layout
self.main_layout = QtGui.QVBoxLayout()
self.main_layout.addWidget(self.title_label)
self.main_layout.addWidget(self.books_list)
# Set the layout
self.setLayout(self.main_layout)
def update_list(self, books_list):
""" Update the books list """
assert isinstance(books_list, list)
self.model.setStringList(books_list)
| mit | 8,205,318,752,501,356,000 | 28.432432 | 79 | 0.6382 | false |
bcroq/kansha | kansha/card_addons/vote/comp.py | 1 | 1757 | # -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from nagare import security
from peak.rules import when
from nagare.security import common
from kansha.card import Card
from kansha.cardextension import CardExtension
from kansha.board import VOTES_PUBLIC, VOTES_MEMBERS
from .models import DataVote
class Votes(CardExtension):
'''Vote component'''
LOAD_PRIORITY = 70
@property
def allowed(self):
return self.configurator.votes_allowed
def count_votes(self):
'''Returns number of votes for a card'''
return DataVote.count_votes(self.card.data)
def toggle(self):
'''Add a vote to the current card.
Remove vote if user has already voted
'''
security.check_permissions('vote', self)
user = security.get_user()
if self.has_voted():
DataVote.get_vote(self.card.data, user.data).delete()
else:
DataVote(card=self.card.data, user=user.data)
def has_voted(self):
'''Check if the current user already vote for this card'''
user = security.get_user()
return DataVote.has_voted(self.card.data, user.data)
def delete(self):
DataVote.purge(self.card.data)
# FIXME: redesign security from scratch
@when(common.Rules.has_permission, "user and perm == 'vote' and isinstance(subject, Votes)")
def has_permission_Card_vote(self, user, perm, votes):
return ((security.has_permissions('edit', votes.card) and votes.allowed == VOTES_MEMBERS) or
(votes.allowed == VOTES_PUBLIC and user))
| bsd-3-clause | 5,426,557,955,261,706,000 | 28.283333 | 96 | 0.671599 | false |
siriuslee/pyoperant | pyoperant/tlab/local_chronic.py | 1 | 3078 | import datetime as dt
import os
import logging
import argparse
from functools import wraps
from pyoperant import hwio, components, panels, utils, InterfaceError, events
from pyoperant.interfaces import nidaq_
logger = logging.getLogger(__name__)
class Panel131(panels.BasePanel):
""" The chronic recordings box in room 131
The speaker should probably be the address of the nidaq card
Parameters
----------
name: string
Name of this box
speaker: string
Speaker device name for this box
channel: string
The channel name for the analog output
input_channel: string
The channel name for a boolean input (e.g. perch or peck-port)
Default None means no input configured
Attributes
----------
Examples
--------
"""
_default_sound_file = "C:/DATA/stimuli/stim_test/1.wav"
def __init__(self, speaker="Dev1", channel="ao0", input_channel=None, name=None, *args, **kwargs):
super(Panel131, self).__init__(self, *args, **kwargs)
self.name = name
# Initialize interfaces
speaker_out = nidaq_.NIDAQmxAudioInterface(device_name=speaker,
clock_channel="/Dev1/PFI0")
# Create a digital to analog event handler
analog_event_handler = events.EventDToAHandler(channel=speaker + "/" + "ao1",
scaling=3.3,
metadata_bytes=40)
# Create an audio output
audio_out = hwio.AudioOutput(interface=speaker_out,
params={"channel": speaker + "/" + channel,
"analog_event_handler": analog_event_handler})
# Add boolean hwios to inputs and outputs
self.inputs = []
self.outputs = [audio_out]
# Set up components
self.speaker = components.Speaker(output=audio_out)
if input_channel is not None:
boolean_input = hwio.BooleanInput(name="Button",
interface=speaker_out,
params={"channel": speaker + "/" + input_channel,
"invert": True})
self.inputs.append(boolean_input)
self.button = components.Button(IR=boolean_input)
def reset(self):
pass
def sleep(self):
pass
def ready(self):
pass
def idle(self):
pass
def poll_then_sound(self, timeout=None):
if not hasattr(self, "button"):
raise AttributeError("This panel does not have a button")
self.speaker.queue(self._default_sound_file)
self.button.poll(timeout=timeout)
self.speaker.play()
class PanelWithInput(Panel131):
def __init__(self, *args, **kwargs):
super(PanelWithInput, self).__init__(name="Panel with input",
input_channel="port0/line5")
| gpl-3.0 | 9,138,150,092,646,161,000 | 29.475248 | 102 | 0.550682 | false |
mildass/tlsfuzzer | scripts/test-ecdhe-padded-shared-secret.py | 1 | 10145 | # Author: Hubert Kario, (c) 2018
# Released under Gnu GPL v2.0, see LICENSE file for details
"""Test for correct handling of zero-padded ECDHE shared secrets."""
from __future__ import print_function
import traceback
import sys
import getopt
from itertools import chain, islice
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, AlertGenerator, \
CopyVariables
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectClose, ExpectServerKeyExchange, \
ExpectApplicationData
from tlsfuzzer.utils.lists import natural_sort_keys
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription, \
ExtensionType, GroupName, ECPointFormat
from tlslite.extensions import ECPointFormatsExtension, \
SupportedGroupsExtension
version = 1
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -n num only run `num` random tests instead of a full set")
print(" (excluding \"sanity\" tests)")
print(" --min-zeros m minimal number of zeros that have to be cut from")
print(" shared secret for test case to be valid,")
print(" 1 by default")
print(" --help this message")
def main():
"""Verify correct ECDHE shared secret handling."""
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
min_zeros = 1
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:n:", ["help", "min-zeros="])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
elif opt == '--min-zeros':
min_zeros = int(arg)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
collected_premaster_secrets = []
variables_check = \
{'premaster_secret':
collected_premaster_secrets}
groups = [GroupName.x25519, GroupName.x448, GroupName.secp256r1,
GroupName.secp384r1, GroupName.secp521r1]
conversations = {}
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA]
groups_ext = SupportedGroupsExtension().create(groups)
points_ext = ECPointFormatsExtension().create([ECPointFormat.uncompressed])
exts = {ExtensionType.renegotiation_info: None,
ExtensionType.supported_groups: groups_ext,
ExtensionType.ec_point_formats: points_ext}
node = node.add_child(ClientHelloGenerator(ciphers, extensions=exts))
exts = {ExtensionType.renegotiation_info:None,
ExtensionType.ec_point_formats: None}
node = node.add_child(ExpectServerHello(extensions=exts))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(CopyVariables(variables_check))
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
bytearray(b"GET / HTTP/1.0\n\n")))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
conversations["sanity"] = conversation
for prot in [(3, 0), (3, 1), (3, 2), (3, 3)]:
for ssl2 in [True, False]:
for group in groups:
# with SSLv2 compatible or with SSLv3 we can't advertise
# curves so do just one check
if (ssl2 or prot == (3, 0)) and group != groups[0]:
continue
conversation = Connect(host, port,
version=(0, 2) if ssl2 else (3, 0))
node = conversation
ciphers = [CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
if ssl2 or prot == (3, 0):
exts = None
else:
groups_ext = SupportedGroupsExtension().create([group])
exts = {ExtensionType.supported_groups: groups_ext,
ExtensionType.ec_point_formats: points_ext}
node = node.add_child(ClientHelloGenerator(ciphers,
version=prot,
extensions=exts,
ssl2=ssl2))
if prot > (3, 0):
if ssl2:
ext = {ExtensionType.renegotiation_info: None}
else:
ext = {ExtensionType.renegotiation_info: None,
ExtensionType.ec_point_formats: None}
else:
ext = None
node = node.add_child(ExpectServerHello(extensions=ext,
version=prot))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerKeyExchange())
node = node.add_child(CopyVariables(variables_check))
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
bytearray(b"GET / HTTP/1.0\n\n")))
node = node.add_child(ExpectApplicationData())
if prot < (3, 2):
# 1/n-1 record splitting
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
conversations["Protocol {0}{1}{2}".format(
prot,
"" if ssl2 or prot < (3, 1)
else " with {0} group".format(GroupName.toStr(group)),
" in SSLv2 compatible ClientHello" if ssl2 else "")] = \
conversation
# run the conversation
good = 0
bad = 0
failed = []
if not num_limit:
num_limit = len(conversations)
# make sure that sanity test is run first and last
# to verify that server was running and kept running throught
sanity_test = ('sanity', conversations['sanity'])
ordered_tests = chain([sanity_test],
islice(filter(lambda x: x[0] != 'sanity',
conversations.items()), num_limit),
[sanity_test])
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
i = 0
break_loop = False
while True:
# don't hog the memory unnecessairly
collected_premaster_secrets[:] = []
print("\"{1}\" repeat {0}...".format(i, c_name))
i += 1
if c_name == 'sanity':
break_loop = True
runner = Runner(c_test)
res = True
try:
runner.run()
except Exception:
print("Error while processing")
print(traceback.format_exc())
res = False
if res:
good += 1
if collected_premaster_secrets[-1][:min_zeros] == \
bytearray(min_zeros):
print("Got premaster secret with {0} most significant "
"bytes equal to zero."
.format(min_zeros))
break_loop = True
print("OK\n")
else:
bad += 1
failed.append(c_name)
break
if break_loop:
break
print('')
print("Check if the connections work when the calculated ECDH shared")
print("secret must be padded on the left with zeros")
print("version: {0}\n".format(version))
print("Test end")
print("successful: {0}".format(good))
print("failed: {0}".format(bad))
failed_sorted = sorted(failed, key=natural_sort_keys)
print(" {0}".format('\n '.join(repr(i) for i in failed_sorted)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-2.0 | 3,597,437,850,988,840,400 | 39.418327 | 84 | 0.550025 | false |
francbartoli/geonode | geonode/geoserver/views.py | 1 | 34744 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import re
import json
import logging
import traceback
from lxml import etree
from defusedxml import lxml as dlxml
from os.path import isfile
from urllib.parse import (
urlsplit,
urljoin,
unquote,
parse_qsl)
from django.contrib.auth import authenticate
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.http import require_POST
from django.shortcuts import render
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.template.loader import get_template
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.translation import ugettext as _
from guardian.shortcuts import get_objects_for_user
from geonode.base.models import ResourceBase
from geonode.compat import ensure_string
from geonode.base.auth import get_or_create_token
from geonode.decorators import logged_in_or_basicauth
from geonode.layers.forms import LayerStyleUploadForm
from geonode.layers.models import Layer, Style
from geonode.layers.views import _resolve_layer, _PERMISSION_MSG_MODIFY
from geonode.maps.models import Map
from geonode.proxy.views import proxy
from .tasks import geoserver_update_layers
from geonode.utils import (
json_response,
_get_basic_auth_info,
http_client,
get_layer_workspace)
from geoserver.catalog import FailedRequestError
from geonode.geoserver.signals import (
gs_catalog,
geoserver_post_save_local)
from .helpers import (
get_stores,
ogc_server_settings,
extract_name_from_sld,
set_styles,
style_update,
set_layer_style,
temp_style_name_regex,
_stylefilterparams_geowebcache_layer,
_invalidate_geowebcache_layer)
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import cache_control
logger = logging.getLogger(__name__)
def stores(request, store_type=None):
stores = get_stores(store_type)
data = json.dumps(stores)
return HttpResponse(data)
@user_passes_test(lambda u: u.is_superuser)
def updatelayers(request):
params = request.GET
# Get the owner specified in the request if any, otherwise used the logged
# user
owner = params.get('owner', None)
owner = get_user_model().objects.get(
username=owner) if owner is not None else request.user
workspace = params.get('workspace', None)
store = params.get('store', None)
filter = params.get('filter', None)
result = geoserver_update_layers.delay(
ignore_errors=False, owner=owner, workspace=workspace,
store=store, filter=filter)
# Attempt to run task synchronously
result.get()
return HttpResponseRedirect(reverse('layer_browse'))
@login_required
@require_POST
def layer_style(request, layername):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
style_name = request.POST.get('defaultStyle')
# would be nice to implement
# better handling of default style switching
# in layer model or deeper (gsconfig.py, REST API)
old_default = layer.default_style
if old_default.name == style_name:
return HttpResponse(
"Default style for %s remains %s" %
(layer.name, style_name), status=200)
# This code assumes without checking
# that the new default style name is included
# in the list of possible styles.
new_style = next(style for style in layer.styles if style.name == style_name)
# Does this change this in geoserver??
layer.default_style = new_style
layer.styles = [
s for s in layer.styles if s.name != style_name] + [old_default]
layer.save(notify=True)
# Invalidate GeoWebCache for the updated resource
try:
_stylefilterparams_geowebcache_layer(layer.alternate)
_invalidate_geowebcache_layer(layer.alternate)
except Exception:
pass
return HttpResponse(
"Default style for %s changed to %s" %
(layer.name, style_name), status=200)
@login_required
def layer_style_upload(request, layername):
def respond(*args, **kw):
kw['content_type'] = 'text/html'
return json_response(*args, **kw)
form = LayerStyleUploadForm(request.POST, request.FILES)
if not form.is_valid():
return respond(errors="Please provide an SLD file.")
data = form.cleaned_data
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
sld = request.FILES['sld'].read()
sld_name = None
try:
# Check SLD is valid
try:
if sld:
if isfile(sld):
with open(sld, "r") as sld_file:
sld = sld_file.read()
etree.XML(sld)
except Exception:
logger.exception("The uploaded SLD file is not valid XML")
raise Exception(
"The uploaded SLD file is not valid XML")
sld_name = extract_name_from_sld(
gs_catalog, sld, sld_file=request.FILES['sld'])
except Exception as e:
respond(errors="The uploaded SLD file is not valid XML: {}".format(e))
name = data.get('name') or sld_name
set_layer_style(layer, data.get('title') or name, sld)
return respond(
body={
'success': True,
'style': data.get('title') or name,
'updated': data['update']})
@login_required
def layer_style_manage(request, layername):
layer = _resolve_layer(
request,
layername,
'layers.change_layer_style',
_PERMISSION_MSG_MODIFY)
if request.method == 'GET':
try:
cat = gs_catalog
# First update the layer style info from GS to GeoNode's DB
try:
set_styles(layer, cat)
except AttributeError:
logger.warn(
'Unable to set the default style. Ensure Geoserver is running and that this layer exists.')
gs_styles = []
# Temporary Hack to remove GeoServer temp styles from the list
Style.objects.filter(name__iregex=r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}_(ms)_\d{13}').delete()
for style in Style.objects.values('name', 'sld_title'):
gs_styles.append((style['name'], style['sld_title']))
current_layer_styles = layer.styles.all()
layer_styles = []
for style in current_layer_styles:
sld_title = style.name
try:
if style.sld_title:
sld_title = style.sld_title
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
layer_styles.append((style.name, sld_title))
# Render the form
def_sld_name = None # noqa
def_sld_title = None # noqa
default_style = None
if layer.default_style:
def_sld_name = layer.default_style.name # noqa
def_sld_title = layer.default_style.name # noqa
try:
if layer.default_style.sld_title:
def_sld_title = layer.default_style.sld_title
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
default_style = (def_sld_name, def_sld_title)
return render(
request,
'layers/layer_style_manage.html',
context={
"layer": layer,
"gs_styles": gs_styles,
"layer_styles": layer_styles,
"layer_style_names": [s[0] for s in layer_styles],
"default_style": default_style
}
)
except (FailedRequestError, EnvironmentError):
tb = traceback.format_exc()
logger.debug(tb)
msg = ('Could not connect to geoserver at "%s"'
'to manage style information for layer "%s"' % (
ogc_server_settings.LOCATION, layer.name)
)
logger.debug(msg)
# If geoserver is not online, return an error
return render(
request,
'layers/layer_style_manage.html',
context={
"layer": layer,
"error": msg
}
)
elif request.method in ('POST', 'PUT', 'DELETE'):
try:
workspace = get_layer_workspace(layer) or settings.DEFAULT_WORKSPACE
selected_styles = request.POST.getlist('style-select')
default_style = request.POST['default_style']
# Save to GeoServer
cat = gs_catalog
try:
gs_layer = cat.get_layer(layer.name)
except Exception:
gs_layer = None
if not gs_layer:
gs_layer = cat.get_layer(layer.alternate)
if gs_layer:
_default_style = cat.get_style(default_style) or \
cat.get_style(default_style, workspace=workspace)
if _default_style:
gs_layer.default_style = _default_style
elif cat.get_style(default_style, workspace=settings.DEFAULT_WORKSPACE):
gs_layer.default_style = cat.get_style(default_style, workspace=settings.DEFAULT_WORKSPACE)
styles = []
for style in selected_styles:
_gs_sld = cat.get_style(style) or cat.get_style(style, workspace=workspace)
if _gs_sld:
styles.append(_gs_sld)
elif cat.get_style(style, workspace=settings.DEFAULT_WORKSPACE):
styles.append(cat.get_style(style, workspace=settings.DEFAULT_WORKSPACE))
else:
Style.objects.filter(name=style).delete()
gs_layer.styles = styles
cat.save(gs_layer)
# Save to Django
set_styles(layer, cat)
# Invalidate GeoWebCache for the updated resource
try:
_stylefilterparams_geowebcache_layer(layer.alternate)
_invalidate_geowebcache_layer(layer.alternate)
except Exception:
pass
return HttpResponseRedirect(
reverse(
'layer_detail',
args=(
layer.service_typename,
)))
except (FailedRequestError, EnvironmentError, MultiValueDictKeyError):
tb = traceback.format_exc()
logger.debug(tb)
msg = ('Error Saving Styles for Layer "%s"' % (layer.name)
)
logger.warn(msg)
return render(
request,
'layers/layer_style_manage.html',
context={
"layer": layer,
"error": msg
}
)
def feature_edit_check(request, layername, permission='change_layer_data'):
"""
If the layer is not a raster and the user has edit permission, return a status of 200 (OK).
Otherwise, return a status of 401 (unauthorized).
"""
try:
layer = _resolve_layer(request, layername)
except Exception:
# Intercept and handle correctly resource not found exception
return HttpResponse(
json.dumps({'authorized': False}), content_type="application/json")
datastore = ogc_server_settings.DATASTORE
feature_edit = datastore
is_admin = False
is_staff = False
is_owner = False
is_manager = False
if request.user:
is_admin = request.user.is_superuser if request.user else False
is_staff = request.user.is_staff if request.user else False
is_owner = (str(request.user) == str(layer.owner))
try:
is_manager = request.user.groupmember_set.all().filter(
role='manager').exists()
except Exception:
is_manager = False
if is_admin or is_staff or is_owner or is_manager or request.user.has_perm(
permission,
obj=layer) and \
((permission == 'change_layer_data' and layer.storeType == 'dataStore' and feature_edit) or
True):
return HttpResponse(
json.dumps({'authorized': True}), content_type="application/json")
else:
return HttpResponse(
json.dumps({'authorized': False}), content_type="application/json")
def style_edit_check(request, layername):
"""
If the layer is not a raster and the user has edit permission, return a status of 200 (OK).
Otherwise, return a status of 401 (unauthorized).
"""
return feature_edit_check(request, layername, permission='change_layer_style')
def style_change_check(request, path):
"""
If the layer has not change_layer_style permission, return a status of
401 (unauthorized)
"""
# a new style is created with a POST and then a PUT,
# a style is updated with a PUT
# a layer is updated with a style with a PUT
# in both case we need to check permissions here
# for PUT path is /gs/rest/styles/san_andres_y_providencia_water_a452004b.xml
# or /ge/rest/layers/geonode:san_andres_y_providencia_coastline.json
# for POST path is /gs/rest/styles
# we will suppose that a user can create a new style only if he is an
# authenticated (we need to discuss about it)
authorized = True
if request.method in ('PUT', 'POST'):
if not request.user.is_authenticated:
authorized = False
elif path == 'rest/layers' and request.method == 'PUT':
# layer update, should be safe to always authorize it
authorized = True
else:
# style new/update
# we will iterate all layers (should be just one if not using GS)
# to which the posted style is associated
# and check if the user has change_style_layer permissions on each
# of them
style_name = os.path.splitext(request.path)[0].split('/')[-1]
if style_name == 'styles' and 'raw' in request.GET:
authorized = True
elif re.match(temp_style_name_regex, style_name):
authorized = True
else:
try:
style = Style.objects.get(name=style_name)
for layer in style.layer_styles.all():
if not request.user.has_perm(
'change_layer_style', obj=layer):
authorized = False
except Exception:
authorized = (request.method == 'POST') # The user is probably trying to create a new style
logger.warn(
'There is not a style with such a name: %s.' % style_name)
return authorized
@csrf_exempt
@logged_in_or_basicauth(realm="GeoNode")
def geoserver_protected_proxy(request,
proxy_path,
downstream_path,
workspace=None,
layername=None):
return geoserver_proxy(request,
proxy_path,
downstream_path,
workspace=workspace,
layername=layername)
@csrf_exempt
@cache_control(public=True, must_revalidate=True, max_age=30)
def geoserver_proxy(request,
proxy_path,
downstream_path,
workspace=None,
layername=None):
"""
WARNING: Decorators are applied in the order they appear in the source.
"""
# AF: No need to authenticate first. We will check if "access_token" is present
# or not on session
# @dismissed
# if not request.user.is_authenticated:
# return HttpResponse(
# "You must be logged in to access GeoServer",
# content_type="text/plain",
# status=401)
def strip_prefix(path, prefix):
if prefix not in path:
_s_prefix = prefix.split('/', 3)
_s_path = path.split('/', 3)
assert _s_prefix[1] == _s_path[1]
_prefix = f'/{_s_path[1]}/{_s_path[2]}'
else:
_prefix = prefix
assert _prefix in path
prefix_idx = path.index(_prefix)
_prefix = path[:prefix_idx] + _prefix
full_prefix = "%s/%s/%s" % (
_prefix, layername, downstream_path) if layername else _prefix
return path[len(full_prefix):]
path = strip_prefix(request.get_full_path(), proxy_path)
raw_url = str(
"".join([ogc_server_settings.LOCATION, downstream_path, path]))
if settings.DEFAULT_WORKSPACE or workspace:
ws = (workspace or settings.DEFAULT_WORKSPACE)
if ws and ws in path:
# Strip out WS from PATH
try:
path = "/%s" % strip_prefix(path, "/%s:" % (ws))
except Exception:
pass
if proxy_path == '/gs/%s' % settings.DEFAULT_WORKSPACE and layername:
import posixpath
raw_url = urljoin(ogc_server_settings.LOCATION,
posixpath.join(workspace, layername, downstream_path, path))
if downstream_path in ('rest/styles') and len(request.body) > 0:
if ws:
# Lets try
# http://localhost:8080/geoserver/rest/workspaces/<ws>/styles/<style>.xml
_url = str("".join([ogc_server_settings.LOCATION,
'rest/workspaces/', ws, '/styles',
path]))
else:
_url = str("".join([ogc_server_settings.LOCATION,
'rest/styles',
path]))
raw_url = _url
if downstream_path in 'ows' and (
'rest' in path or
re.match(r'/(w.*s).*$', path, re.IGNORECASE) or
re.match(r'/(ows).*$', path, re.IGNORECASE)):
_url = str("".join([ogc_server_settings.LOCATION, '', path[1:]]))
raw_url = _url
url = urlsplit(raw_url)
affected_layers = None
if '%s/layers' % ws in path:
downstream_path = 'rest/layers'
elif '%s/styles' % ws in path:
downstream_path = 'rest/styles'
if request.method in ("POST", "PUT", "DELETE"):
if downstream_path in ('rest/styles', 'rest/layers',
'rest/workspaces'):
if not style_change_check(request, downstream_path):
return HttpResponse(
_(
"You don't have permissions to change style for this layer"),
content_type="text/plain",
status=401)
elif downstream_path == 'rest/styles':
logger.debug(
"[geoserver_proxy] Updating Style ---> url %s" %
url.geturl())
_style_name, _style_ext = os.path.splitext(os.path.basename(urlsplit(url.geturl()).path))
if _style_name == 'styles.json' and request.method == "PUT":
_parsed_get_args = dict(parse_qsl(urlsplit(url.geturl()).query))
if 'name' in _parsed_get_args:
_style_name, _style_ext = os.path.splitext(_parsed_get_args['name'])
else:
_style_name, _style_ext = os.path.splitext(_style_name)
if _style_name != 'style-check' and _style_ext == '.json' and \
not re.match(temp_style_name_regex, _style_name):
affected_layers = style_update(request, raw_url)
elif downstream_path == 'rest/layers':
logger.debug(
"[geoserver_proxy] Updating Layer ---> url %s" %
url.geturl())
try:
_layer_name = os.path.splitext(os.path.basename(request.path))[0]
_layer = Layer.objects.get(name__icontains=_layer_name)
affected_layers = [_layer]
except Exception:
logger.warn("Could not find any Layer %s on DB" % os.path.basename(request.path))
kwargs = {'affected_layers': affected_layers}
raw_url = unquote(raw_url)
timeout = getattr(ogc_server_settings, 'TIMEOUT') or 60
allowed_hosts = [urlsplit(ogc_server_settings.public_url).hostname, ]
response = proxy(request, url=raw_url, response_callback=_response_callback,
timeout=timeout, allowed_hosts=allowed_hosts, **kwargs)
return response
def _response_callback(**kwargs):
content = kwargs['content']
status = kwargs['status']
content_type = kwargs['content_type']
content_type_list = ['application/xml', 'text/xml', 'text/plain', 'application/json', 'text/json']
if content:
if not content_type:
if isinstance(content, bytes):
content = content.decode('UTF-8')
if (re.match(r'^<.+>$', content)):
content_type = 'application/xml'
elif (re.match(r'^({|[).+(}|])$', content)):
content_type = 'application/json'
else:
content_type = 'text/plain'
# Replace Proxy URL
try:
if isinstance(content, bytes):
_content = content.decode('UTF-8')
else:
_content = content
if re.findall(r"(?=(\b" + '|'.join(content_type_list) + r"\b))", content_type):
_gn_proxy_url = urljoin(settings.SITEURL, '/gs/')
content = _content\
.replace(ogc_server_settings.LOCATION, _gn_proxy_url)\
.replace(ogc_server_settings.PUBLIC_LOCATION, _gn_proxy_url)
for _ows_endpoint in list(dict.fromkeys(re.findall(rf'{_gn_proxy_url}w\ws', content, re.IGNORECASE))):
content = content.replace(_ows_endpoint, f'{_gn_proxy_url}ows')
except Exception as e:
logger.exception(e)
if 'affected_layers' in kwargs and kwargs['affected_layers']:
for layer in kwargs['affected_layers']:
geoserver_post_save_local(layer)
return HttpResponse(
content=content,
status=status,
content_type=content_type)
def resolve_user(request):
user = None
geoserver = False
superuser = False
acl_user = request.user
if 'HTTP_AUTHORIZATION' in request.META:
username, password = _get_basic_auth_info(request)
acl_user = authenticate(username=username, password=password)
if acl_user:
user = acl_user.username
superuser = acl_user.is_superuser
elif _get_basic_auth_info(request) == ogc_server_settings.credentials:
geoserver = True
superuser = True
else:
return HttpResponse(_("Bad HTTP Authorization Credentials."),
status=401,
content_type="text/plain")
if not any([user, geoserver, superuser]
) and not request.user.is_anonymous:
user = request.user.username
superuser = request.user.is_superuser
resp = {
'user': user,
'geoserver': geoserver,
'superuser': superuser,
}
if acl_user and acl_user.is_authenticated:
resp['fullname'] = acl_user.get_full_name()
resp['email'] = acl_user.email
return HttpResponse(json.dumps(resp), content_type="application/json")
@logged_in_or_basicauth(realm="GeoNode")
def layer_acls(request):
"""
returns json-encoded lists of layer identifiers that
represent the sets of read-write and read-only layers
for the currently authenticated user.
"""
# the layer_acls view supports basic auth, and a special
# user which represents the geoserver administrator that
# is not present in django.
acl_user = request.user
if 'HTTP_AUTHORIZATION' in request.META:
try:
username, password = _get_basic_auth_info(request)
acl_user = authenticate(username=username, password=password)
# Nope, is it the special geoserver user?
if (acl_user is None and
username == ogc_server_settings.USER and
password == ogc_server_settings.PASSWORD):
# great, tell geoserver it's an admin.
result = {
'rw': [],
'ro': [],
'name': username,
'is_superuser': True,
'is_anonymous': False
}
return HttpResponse(
json.dumps(result),
content_type="application/json")
except Exception:
pass
if acl_user is None:
return HttpResponse(_("Bad HTTP Authorization Credentials."),
status=401,
content_type="text/plain")
# Include permissions on the anonymous user
# use of polymorphic selectors/functions to optimize performances
resources_readable = get_objects_for_user(
acl_user, 'view_resourcebase',
ResourceBase.objects.filter(polymorphic_ctype__model='layer')).values_list('id', flat=True)
layer_writable = get_objects_for_user(
acl_user, 'change_layer_data',
Layer.objects.all())
_read = set(
Layer.objects.filter(
id__in=resources_readable).values_list(
'alternate',
flat=True))
_write = set(layer_writable.values_list('alternate', flat=True))
read_only = _read ^ _write
read_write = _read & _write
result = {
'rw': list(read_write),
'ro': list(read_only),
'name': acl_user.username,
'is_superuser': acl_user.is_superuser,
'is_anonymous': acl_user.is_anonymous,
}
if acl_user.is_authenticated:
result['fullname'] = acl_user.get_full_name()
result['email'] = acl_user.email
return HttpResponse(json.dumps(result), content_type="application/json")
# capabilities
def get_layer_capabilities(layer, version='1.3.0', access_token=None, tolerant=False):
"""
Retrieve a layer-specific GetCapabilities document
"""
workspace, layername = layer.alternate.split(":") if ":" in layer.alternate else (None, layer.alternate)
if not layer.remote_service:
wms_url = '%s%s/%s/wms?service=wms&version=%s&request=GetCapabilities'\
% (ogc_server_settings.LOCATION, workspace, layername, version)
if access_token:
wms_url += ('&access_token=%s' % access_token)
else:
wms_url = '%s?service=wms&version=%s&request=GetCapabilities'\
% (layer.remote_service.service_url, version)
_user, _password = ogc_server_settings.credentials
req, content = http_client.get(wms_url, user=_user)
getcap = ensure_string(content)
if not getattr(settings, 'DELAYED_SECURITY_SIGNALS', False):
if tolerant and ('ServiceException' in getcap or req.status_code == 404):
# WARNING Please make sure to have enabled DJANGO CACHE as per
# https://docs.djangoproject.com/en/2.0/topics/cache/#filesystem-caching
wms_url = '%s%s/ows?service=wms&version=%s&request=GetCapabilities&layers=%s'\
% (ogc_server_settings.public_url, workspace, version, layer)
if access_token:
wms_url += ('&access_token=%s' % access_token)
req, content = http_client.get(wms_url, user=_user)
getcap = ensure_string(content)
if 'ServiceException' in getcap or req.status_code == 404:
return None
return getcap.encode('UTF-8')
def format_online_resource(workspace, layer, element, namespaces):
"""
Replace workspace/layer-specific OnlineResource links with the more
generic links returned by a site-wide GetCapabilities document
"""
layerName = element.find('.//wms:Capability/wms:Layer/wms:Layer/wms:Name',
namespaces)
if layerName is None:
return
layerName.text = workspace + ":" + layer if workspace else layer
layerresources = element.findall('.//wms:OnlineResource', namespaces)
if layerresources is None:
return
for resource in layerresources:
wtf = resource.attrib['{http://www.w3.org/1999/xlink}href']
replace_string = "/" + workspace + "/" + layer if workspace else "/" + layer
resource.attrib['{http://www.w3.org/1999/xlink}href'] = wtf.replace(
replace_string, "")
def get_capabilities(request, layerid=None, user=None,
mapid=None, category=None, tolerant=False):
"""
Compile a GetCapabilities document containing public layers
filtered by layer, user, map, or category
"""
rootdoc = None
layers = None
cap_name = ' Capabilities - '
if layerid is not None:
layer_obj = Layer.objects.get(id=layerid)
cap_name += layer_obj.title
layers = Layer.objects.filter(id=layerid)
elif user is not None:
layers = Layer.objects.filter(owner__username=user)
cap_name += user
elif category is not None:
layers = Layer.objects.filter(category__identifier=category)
cap_name += category
elif mapid is not None:
map_obj = Map.objects.get(id=mapid)
cap_name += map_obj.title
alternates = []
for layer in map_obj.layers:
if layer.local:
alternates.append(layer.name)
layers = Layer.objects.filter(alternate__in=alternates)
for layer in layers:
if request.user.has_perm('view_resourcebase',
layer.get_self_resource()):
access_token = get_or_create_token(request.user)
if access_token and not access_token.is_expired():
access_token = access_token.token
else:
access_token = None
try:
workspace, layername = layer.alternate.split(":") if ":" in layer.alternate else (None, layer.alternate)
layercap = get_layer_capabilities(layer,
access_token=access_token,
tolerant=tolerant)
if layercap is not None: # 1st one, seed with real GetCapabilities doc
try:
namespaces = {'wms': 'http://www.opengis.net/wms',
'xlink': 'http://www.w3.org/1999/xlink',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
layercap = dlxml.fromstring(layercap)
rootdoc = etree.ElementTree(layercap)
format_online_resource(workspace, layername, rootdoc, namespaces)
service_name = rootdoc.find('.//wms:Service/wms:Name', namespaces)
if service_name is not None:
service_name.text = cap_name
rootdoc = rootdoc.find('.//wms:Capability/wms:Layer/wms:Layer', namespaces)
except Exception as e:
import traceback
traceback.print_exc()
logger.error(
"Error occurred creating GetCapabilities for %s: %s" %
(layer.typename, str(e)))
rootdoc = None
if layercap is None or not len(layercap) or rootdoc is None or not len(rootdoc):
# Get the required info from layer model
# TODO: store time dimension on DB also
tpl = get_template("geoserver/layer.xml")
ctx = {
'layer': layer,
'geoserver_public_url': ogc_server_settings.public_url,
'catalogue_url': settings.CATALOGUE['default']['URL'],
}
gc_str = tpl.render(ctx)
gc_str = gc_str.encode("utf-8", "replace")
layerelem = etree.XML(gc_str)
rootdoc = etree.ElementTree(layerelem)
except Exception as e:
import traceback
traceback.print_exc()
logger.error(
"Error occurred creating GetCapabilities for %s:%s" %
(layer.typename, str(e)))
rootdoc = None
if rootdoc is not None:
capabilities = etree.tostring(
rootdoc,
xml_declaration=True,
encoding='UTF-8',
pretty_print=True)
return HttpResponse(capabilities, content_type="text/xml")
return HttpResponse(status=200)
def server_online(request):
"""
Returns {success} whenever the LOCAL_GEOSERVER is up and running
"""
from .helpers import check_geoserver_is_up
try:
check_geoserver_is_up()
return HttpResponse(json.dumps({'online': True}), content_type="application/json")
except Exception:
return HttpResponse(json.dumps({'online': False}), content_type="application/json")
| gpl-3.0 | -7,559,356,352,611,287,000 | 38.126126 | 120 | 0.568213 | false |
google/makani | lib/python/batch_sim/scoring_functions/status.py | 1 | 1380 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scoring functions whose goal is not to score but to indicate status."""
from makani.lib.python.batch_sim import scoring_functions
class LoopAngleScoringFunction(scoring_functions.ScoringFunction):
"""Indicate loop angles for analysis/plotting purposes."""
def __init__(self, section_name):
super(LoopAngleScoringFunction, self).__init__(
'Loop Angle (%s)' % section_name, 'rad', severity=0)
def GetSystemLabels(self):
return ['controls']
def GetTimeSeries(self, params, sim, control):
flight_modes = ['kFlightModeCrosswindNormal',
'kFlightModeCrosswindPrepTransOut']
loop_angle = self._SelectTelemetry(
sim, control, ['loop_angle'],
flight_modes=flight_modes)
return {
'loop_angle': loop_angle,
}
| apache-2.0 | -2,921,351,656,038,700,500 | 34.384615 | 74 | 0.714493 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.