code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# chatpy
# Copyright 2013-2015 aqn
# the original source code is written by Joshua Roesslein (Tweepy)
# See LICENSE for details.
"""
Chatwork API library
"""
__version__ = '0.3.0'
__license__ = 'MIT'
from chatpy.models import Status, ModelFactory
from chatpy.error import ChatpyError
from chatpy.api import API
from chatpy.cache import Cache, MemoryCache, FileCache
from chatpy.auth import TokenAuthHandler
# Global, unauthenticated instance of API
api = API()
def debug(level=1):
from six.moves import http_client
http_client.HTTPConnection.debuglevel = level
| aqn/chatpy | chatpy/__init__.py | Python | mit | 575 |
# (c) 2015, Robert Chady <rchady@sitepen.com>
# Based on `runner/lookup_plugins/file.py` for Ansible
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Debops.
# This file is NOT part of Ansible yet.
#
# Debops is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Debops. If not, see <https://www.gnu.org/licenses/>.
import os
try:
from debops import *
from debops.cmds import *
except ImportError:
pass
try:
from ansible.plugins.lookup import LookupBase
except ImportError:
LookupBase = object
from distutils.version import LooseVersion
from ansible import __version__ as __ansible_version__
'''
This file implements the `template_src` lookup filter for Ansible. In
difference to the `template` filter, this searches values based on the
`template-paths` variable (colon separated) as configured in DebOps.
NOTE: This means this filter relies on DebOps.
'''
__author__ = "Robert Chady <rchady@sitepen.com>"
__copyright__ = "Copyright 2015 by Robert Chady <rchady@sitepen.com>"
__license__ = "GNU General Public LIcense version 3 (GPL v3) or later"
conf_tpl_paths = 'template-paths'
class LookupModule(LookupBase):
def __new__(class_name, *args, **kwargs):
if LooseVersion(__ansible_version__) < LooseVersion("2.0"):
from ansible import utils, errors
class LookupModuleV1(object):
def __init__(self, basedir, *args, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(
terms, self.basedir, inject)
ret = []
config = {}
places = []
# this can happen if the variable contains a string,
# strictly not desired for lookup plugins, but users may
# try it, so make it work.
if not isinstance(terms, list):
terms = [terms]
try:
project_root = find_debops_project(required=False)
config = read_config(project_root)
except NameError:
pass
if 'paths' in config and conf_tpl_paths in config['paths']:
custom_places = (
config['paths'][conf_tpl_paths].split(':'))
for custom_path in custom_places:
if os.path.isabs(custom_path):
places.append(custom_path)
else:
places.append(os.path.join(
project_root, custom_path))
for term in terms:
if '_original_file' in inject:
relative_path = (
utils.path_dwim_relative(
inject['_original_file'], 'templates',
'', self.basedir, check=False))
places.append(relative_path)
for path in places:
template = os.path.join(path, term)
if template and os.path.exists(template):
ret.append(template)
break
else:
raise errors.AnsibleError(
"could not locate file in lookup: %s"
% term)
return ret
return LookupModuleV1(*args, **kwargs)
else:
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModuleV2(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
config = {}
places = []
# this can happen if the variable contains a string,
# strictly not desired for lookup plugins, but users may
# try it, so make it work.
if not isinstance(terms, list):
terms = [terms]
try:
project_root = find_debops_project(required=False)
config = read_config(project_root)
except NameError:
pass
if 'paths' in config and conf_tpl_paths in config['paths']:
custom_places = (
config['paths'][conf_tpl_paths].split(':'))
for custom_path in custom_places:
if os.path.isabs(custom_path):
places.append(custom_path)
else:
places.append(os.path.join(
project_root, custom_path))
for term in terms:
if 'role_path' in variables:
relative_path = (
self._loader.path_dwim_relative(
variables['role_path'], 'templates',
''))
places.append(relative_path)
for path in places:
template = os.path.join(path, term)
if template and os.path.exists(template):
ret.append(template)
break
else:
raise AnsibleError(
"could not locate file in lookup: %s"
% term)
return ret
return LookupModuleV2(*args, **kwargs)
| ganto/debops | ansible/roles/ansible_plugins/lookup_plugins/template_src.py | Python | gpl-3.0 | 6,642 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test of the HDF storage for the Tables API.
Copyright 2009-2014 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import time
import pytest
import omero.columns
import omero.tables
import logging
import tables
import threading
import Ice
from omero_ext.mox import Mox
from omero.rtypes import rint, rstring
from library import TestCase
from path import path
logging.basicConfig(level=logging.CRITICAL)
class MockAdapter(object):
def __init__(self, ic):
self.ic = ic
def getCommunicator(self):
return self.ic
class TestHdfStorage(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.ic = Ice.initialize()
self.current = Ice.Current()
self.current.adapter = MockAdapter(self.ic)
self.lock = threading.RLock()
for of in omero.columns.ObjectFactories.values():
of.register(self.ic)
def cols(self):
a = omero.columns.LongColumnI('a', 'first', None)
b = omero.columns.LongColumnI('b', 'first', None)
c = omero.columns.LongColumnI('c', 'first', None)
return [a, b, c]
def init(self, hdf, meta=False):
if meta:
m = {"analysisA": 1, "analysisB": "param", "analysisC": 4.1}
else:
m = None
hdf.initialize(self.cols(), m)
def append(self, hdf, map):
cols = self.cols()
for col in cols:
try:
col.values = [map[col.name]]
except KeyError:
col.values = []
hdf.append(cols)
def hdfpath(self):
tmpdir = self.tmpdir()
return path(tmpdir) / "test.h5"
def testInvalidFile(self):
pytest.raises(
omero.ApiUsageException, omero.tables.HdfStorage, None, None)
pytest.raises(
omero.ApiUsageException, omero.tables.HdfStorage, '', self.lock)
bad = path(self.tmpdir()) / "doesntexist" / "test.h5"
pytest.raises(
omero.ApiUsageException, omero.tables.HdfStorage, bad, self.lock)
def testValidFile(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
hdf.cleanup()
def testLocking(self):
tmp = str(self.hdfpath())
hdf1 = omero.tables.HdfStorage(tmp, self.lock)
with pytest.raises(omero.LockTimeout) as exc_info:
omero.tables.HdfStorage(tmp, self.lock)
assert exc_info.value.message.startswith('Path already in HdfList: ')
hdf1.cleanup()
hdf3 = omero.tables.HdfStorage(tmp, self.lock)
hdf3.cleanup()
def testSimpleCreation(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, False)
hdf.cleanup()
def testCreationWithMetadata(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, True)
hdf.cleanup()
def testAddSingleRow(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, True)
self.append(hdf, {"a": 1, "b": 2, "c": 3})
hdf.cleanup()
def testModifyRow(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, True)
self.append(hdf, {"a": 1, "b": 2, "c": 3})
self.append(hdf, {"a": 5, "b": 6, "c": 7})
data = hdf.readCoordinates(hdf._stamp, [0, 1], self.current)
data.columns[0].values[0] = 100
data.columns[0].values[1] = 200
data.columns[1].values[0] = 300
data.columns[1].values[1] = 400
hdf.update(hdf._stamp, data)
hdf.readCoordinates(hdf._stamp, [0, 1], self.current)
hdf.cleanup()
def testReadTicket1951(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, True)
self.append(hdf, {"a": 1, "b": 2, "c": 3})
hdf.readCoordinates(hdf._stamp, [0], self.current)
hdf.read(hdf._stamp, [0, 1, 2], 0, 1, self.current)
hdf.cleanup()
def testSorting(self): # Probably shouldn't work
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, True)
self.append(hdf, {"a": 0, "b": 2, "c": 3})
self.append(hdf, {"a": 4, "b": 4, "c": 4})
self.append(hdf, {"a": 0, "b": 1, "c": 0})
self.append(hdf, {"a": 0, "b": 0, "c": 0})
self.append(hdf, {"a": 0, "b": 4, "c": 0})
self.append(hdf, {"a": 0, "b": 0, "c": 0})
hdf.getWhereList(time.time(), '(a==0)', None, 'b', None, None, None)
# Doesn't work yet.
hdf.cleanup()
def testInitializeInvalidColoumnNames(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
with pytest.raises(omero.ApiUsageException) as exc:
hdf.initialize([omero.columns.LongColumnI('')], None)
assert exc.value.message.startswith('Column unnamed:')
with pytest.raises(omero.ApiUsageException) as exc:
hdf.initialize([omero.columns.LongColumnI('__a')], None)
assert exc.value.message == 'Reserved column name: __a'
hdf.initialize([omero.columns.LongColumnI('a')], None)
hdf.cleanup()
def testInitializationOnInitializedFileFails(self):
p = self.hdfpath()
hdf = omero.tables.HdfStorage(p, self.lock)
self.init(hdf, True)
hdf.cleanup()
hdf = omero.tables.HdfStorage(p, self.lock)
try:
self.init(hdf, True)
assert False
except omero.ApiUsageException:
pass
hdf.cleanup()
"""
Hard fails disabled. See #2067
def testAddColumn(self):
assert False, "NYI"
def testMergeFiles(self):
assert False, "NYI"
def testVersion(self):
assert False, "NYI"
"""
def testHandlesExistingDirectory(self):
t = path(self.tmpdir())
h = t / "test.h5"
assert t.exists()
hdf = omero.tables.HdfStorage(h, self.lock)
hdf.cleanup()
def testGetSetMetaMap(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
self.init(hdf, False)
hdf.add_meta_map({'a': rint(1)})
m1 = hdf.get_meta_map()
assert len(m1) == 3
assert m1['__initialized'].val > 0
assert m1['__version'] == rstring('2')
assert m1['a'] == rint(1)
with pytest.raises(omero.ApiUsageException) as exc:
hdf.add_meta_map({'b': rint(1), '__c': rint(2)})
assert exc.value.message == 'Reserved attribute name: __c'
assert hdf.get_meta_map() == m1
with pytest.raises(omero.ValidationException) as exc:
hdf.add_meta_map({'d': rint(None)})
assert exc.value.serverStackTrace.startswith('Unsupported type:')
assert hdf.get_meta_map() == m1
hdf.add_meta_map({}, replace=True)
m2 = hdf.get_meta_map()
assert len(m2) == 2
assert m2 == {
'__initialized': m1['__initialized'], '__version': rstring('2')}
hdf.add_meta_map({'__test': 1}, replace=True, init=True)
m3 = hdf.get_meta_map()
assert m3 == {'__test': rint(1)}
hdf.cleanup()
def testStringCol(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
cols = [omero.columns.StringColumnI("name", "description", 16, None)]
hdf.initialize(cols)
cols[0].settable(hdf._HdfStorage__mea) # Needed for size
cols[0].values = ["foo"]
hdf.append(cols)
rows = hdf.getWhereList(time.time(), '(name=="foo")', None, 'b', None,
None, None)
assert 1 == len(rows)
assert 16 == hdf.readCoordinates(time.time(), [0],
self.current).columns[0].size
# Doesn't work yet.
hdf.cleanup()
#
# ROIs
#
def testMaskColumn(self):
hdf = omero.tables.HdfStorage(self.hdfpath(), self.lock)
mask = omero.columns.MaskColumnI('mask', 'desc', None)
hdf.initialize([mask], None)
mask.imageId = [1, 2]
mask.theZ = [2, 2]
mask.theT = [3, 3]
mask.x = [4, 4]
mask.y = [5, 5]
mask.w = [6, 6]
mask.h = [7, 7]
mask.bytes = [[0], [0, 1, 2, 3, 4]]
hdf.append([mask])
data = hdf.readCoordinates(hdf._stamp, [0, 1], self.current)
test = data.columns[0]
assert 1 == test.imageId[0]
assert 2 == test.theZ[0]
assert 3 == test.theT[0]
assert 4 == test.x[0]
assert 5 == test.y[0]
assert 6 == test.w[0]
assert 7 == test.h[0]
assert [0] == test.bytes[0]
assert 2 == test.imageId[1]
assert 2 == test.theZ[1]
assert 3 == test.theT[1]
assert 4 == test.x[1]
assert 5 == test.y[1]
assert 6 == test.w[1]
assert 7 == test.h[1]
assert [0 == 1, 2, 3, 4], test.bytes[1]
hdf.cleanup()
class TestHdfList(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.mox = Mox()
def hdfpath(self):
tmpdir = self.tmpdir()
return path(tmpdir) / "test.h5"
def testLocking(self, monkeypatch):
lock1 = threading.RLock()
hdflist2 = omero.tables.HdfList()
lock2 = threading.RLock()
tmp = str(self.hdfpath())
# Using omero.tables.HDFLIST
hdf1 = omero.tables.HdfStorage(tmp, lock1)
# There are multiple guards against opening the same HDF5 file
# PyTables includes a check
monkeypatch.setattr(omero.tables, 'HDFLIST', hdflist2)
with pytest.raises(ValueError) as exc_info:
omero.tables.HdfStorage(tmp, lock2)
assert exc_info.value.message.startswith(
"The file '%s' is already opened. " % tmp)
monkeypatch.undo()
# HdfList uses portalocker, test by mocking tables.openFile
self.mox.StubOutWithMock(tables, 'openFile')
tables.openFile(tmp, mode='w', title='OMERO HDF Measurement Storage',
rootUEP='/').AndReturn(open(tmp))
self.mox.ReplayAll()
monkeypatch.setattr(omero.tables, 'HDFLIST', hdflist2)
with pytest.raises(omero.LockTimeout) as exc_info:
omero.tables.HdfStorage(tmp, lock2)
print exc_info.value
assert (exc_info.value.message ==
'Cannot acquire exclusive lock on: %s' % tmp)
monkeypatch.undo()
hdf1.cleanup()
self.mox.UnsetStubs()
self.mox.VerifyAll()
| tp81/openmicroscopy | components/tools/OmeroPy/test/unit/tablestest/test_hdfstorage.py | Python | gpl-2.0 | 10,650 |
f = lambda n: n*(n+1)/2
n = 2000000
ans = (0, n)
i = 1
while f(i)**2 <= n*2:
j = i
s = f(i)*f(j)
while s <= n*2:
if ans[1] > abs(s-n):
ans = (i*j, abs(s-n))
j += 1
s = f(i)*f(j)
i += 1
print ans
| huangshenno1/algo | project_euler/85.py | Python | mit | 208 |
#! usr/bin/env python
#
# APM - Another Python Mud
# Copyright (C) 2012 bdubyapee (BWP) p h i p p s b @ g m a i l . c o m
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Filename: color.py
File Description: Contains code to add, or remove, ANSI color capabilities for end users
using telnet clients. This will eventually become the 'wilderness' module for
providing an in-game colorized ASCII character map for players to move around in
between areas.
Public variables:
None
Public functions:
colorize(text) :
Accepts: text - string
Returns: string
decolorize(text) :
Accepts: text - string
Returns: string
Public classes:
None
Private variables:
_color_table : dictionary
Contains a mapping of 'special symbols' that equate to ANSI codes.
Private functions:
None
Private classes:
None
"""
# Mapping to convert APM type symbols to ANSI codes to send to telnet clients.
_color_table = {'{x': '0;0m', # Clear back to white on black
'{*': '\x07', # Beep code
'{d': '0;30m', # Set foreground color to black
'{r': '0;31m', # Set foreground color to red
'{g': '0;32m', # Set foreground color to green
'{y': '0;33m', # Set foreground color to yellow
'{b': '0;34m', # Set foreground color to blue
'{p': '0;35m', # Set foreground color to magenta (purple)
'{c': '0;36m', # Set foreground color to cyan
'{w': '0;37m', # Set foreground color to white
'{D': '1;30m', # Set foreground color to bright black
'{R': '1;31m', # Set foreground color to bright red
'{G': '1;32m', # Set foreground color to bright green
'{Y': '1;33m', # Set foreground color to bright yellow
'{B': '1;34m', # Set foreground color to bright blue
'{P': '1;35m', # Set foreground color to bright magenta (purple)
'{C': '1;36m', # Set foreground color to bright cyan
'{W': '1;37m', # Set foreground color to bright white
'{X': '0;40m', # Set background color to black
'{br': '0;41m', # Set background color to red
'{bg': '0;42m', # Set background color to green
'{by': '0;43m', # Set background color to yellow
'{bb': '0;44m', # Set background color to blue
'{bp': '0;45m', # Set background color to magenta
'{bc': '0;46m', # Set background color to cyan
'{bw': '0;47m', # Set background color to white
'{bD': '1;40m', # Set background color to bright black
'{bR': '1;41m', # Set background color to bright red
'{Z': '1;42m', # Set background color to bright green
'{bY': '1;43m', # Set background color to bright yellow
'{bB': '1;44m', # Set background color to bright blue
'{bP': '1;45m', # Set background color to bright magenta (purple)
'{bC': '1;46m', # Set background color to bright cyan
'{bW': '1;47m'} # Set background color to bright white
def colorize(text):
""" Accept a string, return a string with ANSI color codes embedded in place of special characters.
Keyword arguments:
text -- a string type
Return value:
return -- a string type
Example:
input = "I am a {W test string{x"
return value = "I am a 1;37m test string0;0m"
Additional notes:
Used to provide color codes(ANSI) to end user telnet terminals that indicate they want color text.
See the color_table dictionary in the color.py file for the symbol to ANSI code conversions.
"""
if '{' in text:
for item, code in _color_table.items():
text = text.replace(item, '\x1b[{0}'.format(code))
return text
else:
return text
def decolorize(text):
""" Accept a string, return a string with with any special color codes removed.
Keyword arguments:
text -- a string type
Return value:
return -- a string type
Example:
input = "I am a {W test string{x"
return value = "I am a test string"
Additional notes:
Used to remove color codes symbols from outgoing text for users who don't have color capable
telnet terminals.
See the color_table dictionary in the color.py file for the symbol to ANSI code conversions.
"""
if '{' in text:
for item in _color_table.keys():
text = text.replace(item, '')
return text
else:
return text
| bdubyapee/apm | src/color.py | Python | gpl-3.0 | 5,554 |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 7 18:14:49 2017
@author: Shabaka
"""
import pandas as pd
import numpy as np
# Define count_entries()
def count_entries(df, *args):
"""Return a dictionary with counts of
occurrences as value for each key."""
#Initialize an empty dictionary: cols_count
cols_count = {}
# Iterate over column names in args
for col_name in args:
# Extract column from DataFrame: col
col = df[col_name]
# Iterate over the column in dataframe
for entry in col:
# If entry is in cols_count, add 1
if entry in cols_count.keys():
cols_count[entry] += 1
# Else add the entry to cols_count, set the value to 1
else:
cols_count[entry] = 1
# Return the cols_count dictionary
return cols_count
# Call count_entries(): result1
result1 = count_entries(tweets_df, 'lang')
# Call count_entries(): result2
result2 = count_entries(tweets_df, 'lang', 'source')
# Print result1 and result2
print(result1)
print(result2)
| qalhata/Python-Scripts-Repo-on-Data-Science | General Multi_Column DataFrame Analysis.py | Python | gpl-3.0 | 1,145 |
#!/usr/bin/env python
from __future__ import division
import numpy as np
import pandas as pd
import time
import glob
import re
from collections import OrderedDict
import composition.support_functions.paths as paths
def load_sim(config='IT73', bintype='logdist', return_cut_dict=False):
# Load simulation dataframe
mypaths = paths.Paths()
infile = '{}/{}_sim/sim_dataframe.hdf5'.format(
mypaths.comp_data_dir, config, bintype)
df = pd.read_hdf(infile)
# Quality Cuts #
# Adapted from PHYSICAL REVIEW D 88, 042004 (2013)
cut_dict = OrderedDict()
# IT specific cuts
cut_dict['ShowerLLH_reco_exists'] = df['reco_exists']
cut_dict['MC_zenith'] = (np.cos(df['MC_zenith']) >= 0.8)
cut_dict['reco_zenith'] = (np.cos(df['reco_zenith']) >= 0.8)
# cut_dict['reco_zenith'] = (np.cos(np.pi - df['reco_zenith']) >= 0.8)
cut_dict['IT_containment'] = (df['IceTop_FractionContainment'] < 1.0)
cut_dict['reco_IT_containment'] = (df['reco_IT_containment'] < 1.0)
cut_dict['LLHlap_IT_containment'] = (df['LLHlap_IT_containment'] < 1.0)
cut_dict['IceTopMaxSignalInEdge'] = np.logical_not(
df['IceTopMaxSignalInEdge'].astype(bool))
cut_dict['IceTopMaxSignal'] = (df['IceTopMaxSignal'] >= 6)
cut_dict['IceTopNeighbourMaxSignal'] = (
df['IceTopNeighbourMaxSignal'] >= 4)
cut_dict['NStations'] = (df['NStations'] >= 5)
cut_dict['StationDensity'] = (df['StationDensity'] >= 0.2)
cut_dict['min_energy'] = (df['reco_energy'] > 10**6.2)
cut_dict['max_energy'] = (df['reco_energy'] < 10**8.0)
# InIce specific cuts
cut_dict['NChannels'] = (df['NChannels'] >= 8)
cut_dict['InIce_containment'] = (df['InIce_FractionContainment'] < 1.0)
cut_dict['reco_InIce_containment'] = (df['reco_InIce_containment'] < 1.0)
cut_dict['LLHlap_InIce_containment'] = (
df['LLHlap_InIce_containment'] < 1.0)
cut_dict['max_charge_frac'] = (df['max_charge_frac'] < 0.3)
# Some conbined cuts
cut_dict['combined_reco_exists'] = df['combined_reco_exists']
cut_dict['reco_exists'] = cut_dict[
'ShowerLLH_reco_exists'] & cut_dict['combined_reco_exists']
cut_dict['num_hits'] = cut_dict['NChannels'] & cut_dict['NStations']
cut_dict['reco_containment'] = cut_dict[
'reco_IT_containment'] & cut_dict['LLHlap_InIce_containment']
# cut_dict['reco_containment'] = cut_dict[
# 'reco_IT_containment'] & cut_dict['reco_InIce_containment']
cut_dict['IT_signal'] = cut_dict['IceTopMaxSignalInEdge'] & cut_dict[
'IceTopMaxSignal'] & cut_dict['IceTopNeighbourMaxSignal']
cut_dict['energy_range'] = cut_dict['min_energy'] & cut_dict['max_energy']
# Add log-energy and log-charge columns to df
df['MC_log_energy'] = np.nan_to_num(np.log10(df['MC_energy']))
df['reco_log_energy'] = np.nan_to_num(np.log10(df['reco_energy']))
df['InIce_log_charge'] = np.nan_to_num(np.log10(df['InIce_charge']))
df['reco_cos_zenith'] = np.cos(np.pi - df['reco_zenith'])
df['ShowerPlane_cos_zenith'] = np.cos(df['ShowerPlane_zenith'])
df['log_s125'] = np.log10(df['s125'])
if return_cut_dict:
return df, cut_dict
else:
selection_mask = np.array([True] * len(df))
standard_cut_keys = ['reco_exists', 'reco_zenith', 'num_hits', 'IT_signal',
'StationDensity', 'reco_containment', 'max_charge_frac', 'energy_range']
for key in standard_cut_keys:
selection_mask *= cut_dict[key]
# Print cut event flow
n_total = len(df)
cut_eff = {}
cumulative_cut_mask = np.array([True] * n_total)
print('Cut event flow:')
for key in standard_cut_keys:
cumulative_cut_mask *= cut_dict[key]
print('{:>30}: {:>5.3} {:>5.3}'.format(key, np.sum(
cut_dict[key]) / n_total, np.sum(cumulative_cut_mask) / n_total))
print('\n')
return df[selection_mask]
| jrbourbeau/composition | analysis/load_sim.py | Python | mit | 3,972 |
# coding=utf-8
from twisted.protocols import amp
from twisted.cred.error import UnauthorizedLogin
from errors import SlotErrorNotification
from errors import BadCredentials
"""
Copyright 2014, 2015 Xabier Crespo Álvarez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:Author:
Xabier Crespo Álvarez (xabicrespog@gmail.com)
"""
__author__ = 'xabicrespog@gmail.com'
class StartRemote(amp.Command):
# arguments = [('iSlotId', amp.Integer())]
response = [('iResult', amp.Integer())]
errors = {
SlotErrorNotification: 'SLOT_ERROR_NOTIFICATION'}
"""
Invoked when a client wants to connect to an N-server. This shall be
called right after invoking login method.
:param iSlotId:
ID number of the slot which should have been previously reserved
through the web interface.
:type iSlotId:
L{int}
:returns iResult:
Raises an error if the slot is not available yet or if it isn't
assigned to the calling client. Otherwise, it may return one of
the following codes:
(0) REMOTE_READY: the remote client is already connected to the server
(-1) CLIENTS_COINCIDE: the remote client is the same as the calling
client
(-2) REMOTE_NOT_CONNECTED: indicates if the the remote client is not
connected
In case that any of the previous cases are detected, the slotId is
returned.
:rtype:
int or L{SlotNotAvailable}
"""
# Remote client ready
REMOTE_READY = 0
# Both MCC and GSS belong to the same client
CLIENTS_COINCIDE = -1
# Remote user not connected yet
REMOTE_NOT_CONNECTED = -2
class EndRemote(amp.Command):
arguments = []
response = [('bResult', amp.Boolean())]
"""
Invoked by a client whenever this one wants to finalize the
remote operation.
"""
class SendMsg(amp.Command):
arguments = [('sMsg', amp.String()),
('iTimestamp', amp.Integer())]
response = [('bResult', amp.Boolean())]
errors = {
SlotErrorNotification: 'SLOT_ERROR_NOTIFICATION'}
"""
Invoked when a client wants to send a message to a remote entity.
To use it, the command StartRemote shall be invoked first.
:param sMsg:
String containing the message
:type sMsg:
L{String}
:param iDopplerShift:
Integer indicating the Doppler shift in kHz
:type iDopplerShift:
L{int}
:param iTimestamp:
Integer indicating the UTC timestamp at reception.
If the command is called before StartRemote raises SlotNotAvailable.
:type iTimestamp:
L{Integer} or L{SlotNotAvailable}
:returns bResult:
True if the command is successfully run
:rtype:
Boolean
"""
# Commandes implemented by G- or M- clients which will be invoked
# by a N-server.
class NotifyEvent(amp.Command):
arguments = [('iEvent', amp.Integer()),
('sDetails', amp.String(optional=True))]
requiresAnswer = False
"""
Used to inform a client about an event in the network.
:param iEvent:
Code indicating the event.There are three cases:
(-1) REMOTE_DISCONNECTED: notifies when the remote client has
been disconnected and it is not receiving the messages.
(-2) SLOT_END: notifies both clients about the slot end
(-3) END_REMOTE: notifies a client that the remote has finished
the connection
(-4) REMOTE_CONNECTED: notifies a client when the remote has just
connected
:type iEvent:
int
:param sDetails:
Details of the event. If it is REMOTE_CONNECTED this parameter
is equal to the username of the remote client. Otherwise the
parameter is None
:type sDetails:
L{String} or None
"""
# Remote user not connected
REMOTE_DISCONNECTED = -1
# Both MCC and GSS belong to the same client
SLOT_END = -2
# Remote client finished connection
END_REMOTE = -3
# Remote client finished connection
REMOTE_CONNECTED = -4
class NotifyMsg(amp.Command):
arguments = [('sMsg', amp.String())]
response = [('bResult', amp.Boolean())]
errors = {
SlotErrorNotification: 'SLOT_ERROR_NOTIFICATION'}
"""
Used to send a message to a remote client.
:param sMsg:
Remote client identification number
:type sMsg:
L{String}
"""
class Login(amp.Command):
arguments = [('sUsername', amp.String()),
('sPassword', amp.String())]
response = [('bAuthenticated', amp.Boolean())]
errors = {
UnauthorizedLogin: 'UNAUTHORIZED_LOGIN',
BadCredentials: 'BAD_CREDENTIALS',
NotImplementedError: 'NOT_IMPLEMENTED_ERROR'}
"""
Command to authenticate an user. The server response is a boolean
granting or not the access to the client.
:param sUsername:
Client username for the SATNET network
:type sUsername:
String
:param sPassword:
Plain-text client password for the SATNET network
:type sPassword:
String
:returns bAuthenticated:
True if the user has been granted access and L{UnauthorizedLogin}
otherwise.
:rtype:
boolean or L{UnauthorizedLogin}
"""
| satnet-project/generic-client | ampCommands.py | Python | apache-2.0 | 5,800 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0010_auto_20180611_2029'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='image_url',
field=models.CharField(blank=True, max_length=2083),
),
]
| PersonalGenomesOrg/open-humans | open_humans/migrations/0011_auto_20180709_2220.py | Python | mit | 385 |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants and helpers for reporting the success status of each benchmark."""
import os
SUCCEEDED = 'SUCCEEDED'
FAILED = 'FAILED'
SKIPPED = 'SKIPPED'
ALL = SUCCEEDED, FAILED, SKIPPED
_COL_SEPARATOR = ' '
def _CreateSummaryTable(run_status_tuples):
"""Converts statuses of benchmark runs into a formatted string table.
Args:
run_status_tuples: List of (benchmark_name, benchmark_uid, status) tuples.
benchmark_name and benchmark_uid are strings that identify one run of a
benchmark, and status is a value from ALL. List must contain at least
one element.
Returns:
string. Multi-line string summarizing benchmark success statuses. Example:
--------------------------------------
Name UID Status
--------------------------------------
iperf iperf0 SUCCEEDED
iperf iperf1 FAILED
cluster_boot cluster_boot0 SKIPPED
--------------------------------------
"""
assert (run_status_tuples,
'run_status_tuples must contain at least one element.')
col_headers = 'Name', 'UID', 'Status'
col_lengths = []
for col_header, col_entries in zip(col_headers, zip(*run_status_tuples)):
max_col_content_length = max(len(entry) for entry in col_entries)
col_lengths.append(max(len(col_header), max_col_content_length))
line_length = (len(col_headers) - 1) * len(_COL_SEPARATOR) + sum(col_lengths)
dash_line = '-' * line_length
line_format = _COL_SEPARATOR.join(
'{{{0}:<{1}s}}'.format(col_index, col_length)
for col_index, col_length in enumerate(col_lengths))
msg = [dash_line, line_format.format(*col_headers), dash_line]
msg.extend(line_format.format(*row_entries)
for row_entries in run_status_tuples)
msg.append(dash_line)
return os.linesep.join(msg)
def CreateSummary(run_status_tuples):
"""Logs a summary of benchmark run statuses.
Args:
run_status_tuples: List of (benchmark_name, benchmark_uid, status) tuples.
benchmark_name and benchmark_uid are strings that identify one run of a
benchmark, and status is a value from ALL. List must contain at least
one element.
Returns:
string. Multi-line string summarizing benchmark success statuses. Example:
Benchmark run statuses:
--------------------------------------
Name UID Status
--------------------------------------
iperf iperf0 SUCCEEDED
iperf iperf1 FAILED
cluster_boot cluster_boot0 SKIPPED
--------------------------------------
Success rate: 33.33% (1/3)
"""
assert (run_status_tuples,
'run_status_tuples must contain at least one element.')
benchmark_count = len(run_status_tuples)
successful_benchmark_count = sum(1 for _, _, status in run_status_tuples
if status == SUCCEEDED)
return os.linesep.join((
'Benchmark run statuses:',
_CreateSummaryTable(run_status_tuples),
'Success rate: {0:.2f}% ({1}/{2})'.format(
100. * successful_benchmark_count / benchmark_count,
successful_benchmark_count, benchmark_count)))
| syed/PerfKitBenchmarker | perfkitbenchmarker/benchmark_status.py | Python | apache-2.0 | 3,856 |
from django.core.checks import Error, register, Warning
import dashboard
import pathlib
# these paths will be checked for existence. Warnings are raised if any are missing.
PO_PATHS = [
'locale/nl/LC_MESSAGES/django.po',
'locale/nl/LC_MESSAGES/djangojs.po',
'dashboard/locale/nl/LC_MESSAGES/django.po',
'dashboard/locale/nl/LC_MESSAGES/djangojs.po'
]
@register()
def translations_check(app_configs, **kwargs):
""" Checks if the given translation files are where they're expected to be. """
errors = []
root = pathlib.Path(dashboard.__file__).parents[1]
for path in PO_PATHS:
po_path = root / path
mo_path = root / (path[:-3] + ".mo")
if not po_path.is_file():
errors.append(
Warning(
'Translation sources are missing or outdated.',
hint="Translations went missing. This shouldn't happen.",
obj="'{!s}'".format(po_path),
id='dashboard.W001',
)
)
if not mo_path.is_file() or mo_path.stat().st_mtime < po_path.stat().st_mtime:
errors.append(
Warning(
'Translation binaries are missing or outdated.',
hint='To solve this, run `manage.py compilemessages`',
obj="'{!s}'".format(po_path),
id='dashboard.W002',
)
)
return errors
| amcat/amcat-dashboard | dashboard/checks.py | Python | agpl-3.0 | 1,471 |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("XGBRegressor" , "diabetes" , "oracle")
| antoinecarme/sklearn2sql_heroku | tests/regression/diabetes/ws_diabetes_XGBRegressor_oracle_code_gen.py | Python | bsd-3-clause | 128 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from oslo.utils import strutils
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta:
name = _('Select Template')
help_text = _('Select a template to launch a stack.')
# TODO(jomara) - update URL choice for template & environment files
# w/ client side download when applicable
base_choices = [('file', _('File')),
('raw', _('Direct Input'))]
url_choice = [('url', _('URL'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=base_choices + url_choice,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=base_choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment',
_('environment'),
cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['template_data']:
kwargs['template'] = cleaned['template_data']
else:
kwargs['template_url'] = cleaned['template_url']
if cleaned['environment_data']:
kwargs['environment'] = cleaned['environment_data']
try:
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
except Exception as e:
raise forms.ValidationError(unicode(e))
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data'],
'template_data': data['template_data'],
'template_url': data['template_url']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta:
name = _('Edit Template')
help_text = _('Select a new template to re-launch a stack.')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta:
name = _('Create Stack')
template_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
template_url = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
parameters = forms.CharField(
widget=forms.widgets.HiddenInput)
stack_name = forms.RegexField(
max_length=255,
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid':
_('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')})
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'))
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if(kwargs.get('validate_me')):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
self._build_parameter_fields(parameters)
def _build_parameter_fields(self, template_validate):
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
widget=forms.PasswordInput())
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
if template_validate.get('ParameterGroups'):
params_in_order = []
for group in template_validate['ParameterGroups']:
for param in group.get('parameters', []):
if param in params:
params_in_order.append((param, params[param]))
else:
# no parameter groups, so no way to determine order
params_in_order = params.items()
for param_key, param in params_in_order:
field = None
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param.get('Label', param_key),
'help_text': param.get('Description', ''),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
hidden = strutils.bool_from_string(param.get('NoEcho', 'false'))
if 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type in ('CommaDelimitedList', 'String', 'Json'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = param.get('MinLength', 0) > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
if hidden:
field_args['widget'] = forms.PasswordInput()
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
# heat-api currently returns the boolean type in lowercase
# (see https://bugs.launchpad.net/heat/+bug/1361448)
# so for better compatibility both are checked here
elif param_type in ('Boolean', 'boolean'):
field = forms.BooleanField(**field_args)
if field:
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
try:
api.heat.stack_create(self.request, **fields)
messages.success(request, _("Stack creation started."))
return True
except Exception:
exceptions.handle(request)
class EditStackForm(CreateStackForm):
class Meta:
name = _('Update Stack Parameters')
stack_id = forms.CharField(
label=_('Stack ID'),
widget=forms.widgets.HiddenInput)
stack_name = forms.CharField(
label=_('Stack Name'),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in six.iteritems(data)
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
# if the user went directly to this form, resubmit the existing
# template data. otherwise, submit what they had from the first form
if data.get('template_data'):
fields['template'] = data.get('template_data')
elif data.get('template_url'):
fields['template_url'] = data.get('template_url')
elif data.get('parameters'):
fields['template'] = data.get('parameters')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.success(request, _("Stack update started."))
return True
except Exception:
exceptions.handle(request)
| CiscoSystems/avos | openstack_dashboard/dashboards/project/stacks/forms.py | Python | apache-2.0 | 15,580 |
#!/usr/bin/python
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from spacewalk.common import rhnLog
from spacewalk.server import rhnImport
rhnLog.initLOG(level=4)
dir = "spacewalk/server/handlers"
for i in range(2):
for iface in ['rpcClasses', 'getHandler']:
m = rhnImport.load(dir, interface_signature=iface)
| colloquium/spacewalk | backend/server/test/test-rhn-import.py | Python | gpl-2.0 | 895 |
from Screens.Screen import Screen
from Screens.LocationBox import MovieLocationBox, TimeshiftLocationBox
from Screens.MessageBox import MessageBox
from Components.Label import Label
from Components.config import config, ConfigSelection, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Tools.Directories import fileExists
from Components.UsageConfig import preferredPath
class RecordPathsSettings(ConfigListScreen, Screen):
skin = """
<screen name="RecordPathsSettings" position="160,150" size="450,200" title="Recording paths">
<ePixmap pixmap="buttons/red.png" position="10,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="300,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="10,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="300,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="config" position="10,44" size="430,146" />
</screen>"""
def __init__(self, session):
from Components.Sources.StaticText import StaticText
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self.setTitle(_("Recording paths"))
ConfigListScreen.__init__(self, [])
self.initConfigList()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"green": self.save,
"red": self.keyCancel,
"cancel": self.keyCancel,
"ok": self.ok,
"menu": self.closeRecursive,
}, -2)
def checkReadWriteDir(self, configele):
value = configele.value
print "checkReadWrite: ", value
if not value or value in [x[0] for x in self.styles] or fileExists(value, "w"):
configele.last_value = value
return True
else:
configele.value = configele.last_value
self.session.open(
MessageBox,
_("The directory %s is not writable.\nMake sure you select a writable directory instead.") % value,
type=MessageBox.TYPE_ERROR
)
return False
def initConfigList(self):
self.styles = [("<default>", _("<Default movie location>")), ("<current>", _("<Current movielist location>")), ("<timer>", _("<Last timer location>"))]
styles_keys = [x[0] for x in self.styles]
tmp = config.movielist.videodirs.value
default = config.usage.default_path.value
if default and default not in tmp:
tmp = tmp[:]
tmp.append(default)
print "DefaultPath: ", default, tmp
self.default_dirname = ConfigSelection(default=default, choices=[("", _("<Default movie location>"))] + tmp)
tmp = config.movielist.videodirs.value
default = config.usage.timer_path.value
if default not in tmp and default not in styles_keys:
tmp = tmp[:]
tmp.append(default)
print "TimerPath: ", default, tmp
self.timer_dirname = ConfigSelection(default=default, choices=self.styles + tmp)
tmp = config.movielist.videodirs.value
default = config.usage.instantrec_path.value
if default not in tmp and default not in styles_keys:
tmp = tmp[:]
tmp.append(default)
print "InstantrecPath: ", default, tmp
self.instantrec_dirname = ConfigSelection(default=default, choices=self.styles + tmp)
default = config.usage.timeshift_path.value
tmp = config.usage.allowed_timeshift_paths.value
if default not in tmp:
tmp = tmp[:]
tmp.append(default)
print "TimeshiftPath: ", default, tmp
self.timeshift_dirname = ConfigSelection(default=default, choices=tmp)
self.default_dirname.addNotifier(self.checkReadWriteDir, initial_call=False, immediate_feedback=False)
self.timer_dirname.addNotifier(self.checkReadWriteDir, initial_call=False, immediate_feedback=False)
self.instantrec_dirname.addNotifier(self.checkReadWriteDir, initial_call=False, immediate_feedback=False)
self.timeshift_dirname.addNotifier(self.checkReadWriteDir, initial_call=False, immediate_feedback=False)
self.list = []
if config.usage.setup_level.index >= 2:
self.default_entry = getConfigListEntry(_("Default movie location"), self.default_dirname)
self.list.append(self.default_entry)
self.timer_entry = getConfigListEntry(_("Timer recording location"), self.timer_dirname)
self.list.append(self.timer_entry)
self.instantrec_entry = getConfigListEntry(_("Instant recording location"), self.instantrec_dirname)
self.list.append(self.instantrec_entry)
else:
self.default_entry = getConfigListEntry(_("Movie location"), self.default_dirname)
self.list.append(self.default_entry)
self.timeshift_entry = getConfigListEntry(_("Timeshift location"), self.timeshift_dirname)
self.list.append(self.timeshift_entry)
self["config"].setList(self.list)
def ok(self):
currentry = self["config"].getCurrent()
self.lastvideodirs = config.movielist.videodirs.value
self.lasttimeshiftdirs = config.usage.allowed_timeshift_paths.value
if config.usage.setup_level.index >= 2:
txt = _("Default movie location")
else:
txt = _("Movie location")
if currentry == self.default_entry:
self.entrydirname = self.default_dirname
self.session.openWithCallback(
self.dirnameSelected,
MovieLocationBox,
txt,
preferredPath(self.default_dirname.value)
)
elif currentry == self.timer_entry:
self.entrydirname = self.timer_dirname
self.session.openWithCallback(
self.dirnameSelected,
MovieLocationBox,
_("Initial location in new timers"),
preferredPath(self.timer_dirname.value)
)
elif currentry == self.instantrec_entry:
self.entrydirname = self.instantrec_dirname
self.session.openWithCallback(
self.dirnameSelected,
MovieLocationBox,
_("Location for instant recordings"),
preferredPath(self.instantrec_dirname.value)
)
elif currentry == self.timeshift_entry:
self.entrydirname = self.timeshift_dirname
config.usage.timeshift_path.value = self.timeshift_dirname.value
self.session.openWithCallback(
self.dirnameSelected,
TimeshiftLocationBox
)
def dirnameSelected(self, res):
if res is not None:
self.entrydirname.value = res
if config.movielist.videodirs.value != self.lastvideodirs:
styles_keys = [x[0] for x in self.styles]
tmp = config.movielist.videodirs.value
default = self.default_dirname.value
if default and default not in tmp:
tmp = tmp[:]
tmp.append(default)
self.default_dirname.setChoices([("", _("<Default movie location>"))] + tmp, default=default)
tmp = config.movielist.videodirs.value
default = self.timer_dirname.value
if default not in tmp and default not in styles_keys:
tmp = tmp[:]
tmp.append(default)
self.timer_dirname.setChoices(self.styles + tmp, default=default)
tmp = config.movielist.videodirs.value
default = self.instantrec_dirname.value
if default not in tmp and default not in styles_keys:
tmp = tmp[:]
tmp.append(default)
self.instantrec_dirname.setChoices(self.styles + tmp, default=default)
self.entrydirname.value = res
if config.usage.allowed_timeshift_paths.value != self.lasttimeshiftdirs:
tmp = config.usage.allowed_timeshift_paths.value
default = self.instantrec_dirname.value
if default not in tmp:
tmp = tmp[:]
tmp.append(default)
self.timeshift_dirname.setChoices(tmp, default=default)
self.entrydirname.value = res
if self.entrydirname.last_value != res:
self.checkReadWriteDir(self.entrydirname)
def save(self):
currentry = self["config"].getCurrent()
if self.checkReadWriteDir(currentry[1]):
config.usage.default_path.value = self.default_dirname.value
config.usage.timer_path.value = self.timer_dirname.value
config.usage.instantrec_path.value = self.instantrec_dirname.value
config.usage.timeshift_path.value = self.timeshift_dirname.value
config.usage.default_path.save()
config.usage.timer_path.save()
config.usage.instantrec_path.save()
config.usage.timeshift_path.save()
self.close()
| OpenPLi/enigma2 | lib/python/Screens/RecordPaths.py | Python | gpl-2.0 | 8,099 |
"""Support for displaying minimal, maximal, mean or median values."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_TYPE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
ATTR_MIN_VALUE = "min_value"
ATTR_MIN_ENTITY_ID = "min_entity_id"
ATTR_MAX_VALUE = "max_value"
ATTR_MAX_ENTITY_ID = "max_entity_id"
ATTR_COUNT_SENSORS = "count_sensors"
ATTR_MEAN = "mean"
ATTR_MEDIAN = "median"
ATTR_LAST = "last"
ATTR_LAST_ENTITY_ID = "last_entity_id"
ATTR_TO_PROPERTY = [
ATTR_COUNT_SENSORS,
ATTR_MAX_VALUE,
ATTR_MAX_ENTITY_ID,
ATTR_MEAN,
ATTR_MEDIAN,
ATTR_MIN_VALUE,
ATTR_MIN_ENTITY_ID,
ATTR_LAST,
ATTR_LAST_ENTITY_ID,
]
CONF_ENTITY_IDS = "entity_ids"
CONF_ROUND_DIGITS = "round_digits"
ICON = "mdi:calculator"
SENSOR_TYPES = {
ATTR_MIN_VALUE: "min",
ATTR_MAX_VALUE: "max",
ATTR_MEAN: "mean",
ATTR_MEDIAN: "median",
ATTR_LAST: "last",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=SENSOR_TYPES[ATTR_MAX_VALUE]): vol.All(
cv.string, vol.In(SENSOR_TYPES.values())
),
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENTITY_IDS): cv.entity_ids,
vol.Optional(CONF_ROUND_DIGITS, default=2): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the min/max/mean sensor."""
entity_ids = config.get(CONF_ENTITY_IDS)
name = config.get(CONF_NAME)
sensor_type = config.get(CONF_TYPE)
round_digits = config.get(CONF_ROUND_DIGITS)
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities([MinMaxSensor(entity_ids, name, sensor_type, round_digits)])
def calc_min(sensor_values):
"""Calculate min value, honoring unknown states."""
val = None
entity_id = None
for sensor_id, sensor_value in sensor_values:
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE] and (
val is None or val > sensor_value
):
entity_id, val = sensor_id, sensor_value
return entity_id, val
def calc_max(sensor_values):
"""Calculate max value, honoring unknown states."""
val = None
entity_id = None
for sensor_id, sensor_value in sensor_values:
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE] and (
val is None or val < sensor_value
):
entity_id, val = sensor_id, sensor_value
return entity_id, val
def calc_mean(sensor_values, round_digits):
"""Calculate mean value, honoring unknown states."""
result = [
sensor_value
for _, sensor_value in sensor_values
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE]
]
if not result:
return None
return round(sum(result) / len(result), round_digits)
def calc_median(sensor_values, round_digits):
"""Calculate median value, honoring unknown states."""
result = [
sensor_value
for _, sensor_value in sensor_values
if sensor_value not in [STATE_UNKNOWN, STATE_UNAVAILABLE]
]
if not result:
return None
result.sort()
if len(result) % 2 == 0:
median1 = result[len(result) // 2]
median2 = result[len(result) // 2 - 1]
median = (median1 + median2) / 2
else:
median = result[len(result) // 2]
return round(median, round_digits)
class MinMaxSensor(SensorEntity):
"""Representation of a min/max sensor."""
def __init__(self, entity_ids, name, sensor_type, round_digits):
"""Initialize the min/max sensor."""
self._entity_ids = entity_ids
self._sensor_type = sensor_type
self._round_digits = round_digits
if name:
self._name = name
else:
self._name = f"{next(v for k, v in SENSOR_TYPES.items() if self._sensor_type == v)} sensor".capitalize()
self._unit_of_measurement = None
self._unit_of_measurement_mismatch = False
self.min_value = self.max_value = self.mean = self.last = self.median = None
self.min_entity_id = self.max_entity_id = self.last_entity_id = None
self.count_sensors = len(self._entity_ids)
self.states = {}
async def async_added_to_hass(self):
"""Handle added to Hass."""
self.async_on_remove(
async_track_state_change_event(
self.hass, self._entity_ids, self._async_min_max_sensor_state_listener
)
)
self._calc_values()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
if self._unit_of_measurement_mismatch:
return None
return getattr(
self, next(k for k, v in SENSOR_TYPES.items() if self._sensor_type == v)
)
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._unit_of_measurement_mismatch:
return "ERR"
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@callback
def _async_min_max_sensor_state_listener(self, event):
"""Handle the sensor state changes."""
new_state = event.data.get("new_state")
entity = event.data.get("entity_id")
if new_state.state is None or new_state.state in [
STATE_UNKNOWN,
STATE_UNAVAILABLE,
]:
self.states[entity] = STATE_UNKNOWN
self._calc_values()
self.async_write_ha_state()
return
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if self._unit_of_measurement != new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
):
_LOGGER.warning(
"Units of measurement do not match for entity %s", self.entity_id
)
self._unit_of_measurement_mismatch = True
try:
self.states[entity] = float(new_state.state)
self.last = float(new_state.state)
self.last_entity_id = entity
except ValueError:
_LOGGER.warning(
"Unable to store state. Only numerical states are supported"
)
self._calc_values()
self.async_write_ha_state()
@callback
def _calc_values(self):
"""Calculate the values."""
sensor_values = [
(entity_id, self.states[entity_id])
for entity_id in self._entity_ids
if entity_id in self.states
]
self.min_entity_id, self.min_value = calc_min(sensor_values)
self.max_entity_id, self.max_value = calc_max(sensor_values)
self.mean = calc_mean(sensor_values, self._round_digits)
self.median = calc_median(sensor_values, self._round_digits)
| jawilson/home-assistant | homeassistant/components/min_max/sensor.py | Python | apache-2.0 | 7,886 |
#!/usr/bin/env python
"""
"""
import vtk
def view_frog(fileName, tissues):
colors = vtk.vtkNamedColors()
tissueMap = CreateTissueMap()
colorLut = CreateFrogLut()
# Setup render window, renderer, and interactor.
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
for tissue in tissues:
actor = CreateFrogActor(fileName, tissueMap[tissue])
actor.GetProperty().SetDiffuseColor( colorLut.GetTableValue(tissueMap[tissue])[:3])
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(10)
renderer.AddActor(actor)
# print("Tissue:", tissue, ", Label:", tissueMap[tissue])
renderer.GetActiveCamera().SetViewUp(0, 0, -1)
renderer.GetActiveCamera().SetPosition(0, -1, 0)
renderer.GetActiveCamera().Azimuth(210)
renderer.GetActiveCamera().Elevation(30)
renderer.ResetCamera()
renderer.ResetCameraClippingRange()
renderer.GetActiveCamera().Dolly(1.5)
renderer.SetBackground(colors.GetColor3d("SlateGray"))
renderWindow.SetSize(640, 480)
renderWindow.Render()
renderWindowInteractor.Start()
def main():
fileName, tissues = get_program_parameters()
view_frog(fileName, tissues)
def get_program_parameters():
import argparse
description = 'The complete frog without skin.'
epilogue = '''
For Figure 12-9b in the VTK Book:
Specify these tissues as parameters after the file name:
blood brain duodenum eyeRetina eyeWhite heart ileum kidney intestine liver lung nerve skeleton spleen stomach
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='frogtissue.mhd.')
parser.add_argument('tissues', nargs='+', help='List of one or more tissues.')
args = parser.parse_args()
return args.filename, args.tissues
def CreateFrogLut():
colors = vtk.vtkNamedColors()
colorLut = vtk.vtkLookupTable()
colorLut.SetNumberOfColors(17)
colorLut.SetTableRange(0, 16)
colorLut.Build()
colorLut.SetTableValue(0, 0, 0, 0, 0)
colorLut.SetTableValue(1, colors.GetColor4d("salmon")) # blood
colorLut.SetTableValue(2, colors.GetColor4d("beige")) # brain
colorLut.SetTableValue(3, colors.GetColor4d("orange")) # duodenum
colorLut.SetTableValue(4, colors.GetColor4d("misty_rose")) # eye_retina
colorLut.SetTableValue(5, colors.GetColor4d("white")) # eye_white
colorLut.SetTableValue(6, colors.GetColor4d("tomato")) # heart
colorLut.SetTableValue(7, colors.GetColor4d("raspberry")) # ileum
colorLut.SetTableValue(8, colors.GetColor4d("banana")) # kidney
colorLut.SetTableValue(9, colors.GetColor4d("peru")) # l_intestine
colorLut.SetTableValue(10, colors.GetColor4d("pink")) # liver
colorLut.SetTableValue(11, colors.GetColor4d("powder_blue")) # lung
colorLut.SetTableValue(12, colors.GetColor4d("carrot")) # nerve
colorLut.SetTableValue(13, colors.GetColor4d("wheat")) # skeleton
colorLut.SetTableValue(14, colors.GetColor4d("violet")) # spleen
colorLut.SetTableValue(15, colors.GetColor4d("plum")) # stomach
return colorLut
def CreateTissueMap():
tissueMap = dict()
tissueMap["blood"] = 1
tissueMap["brain"] = 2
tissueMap["duodenum"] = 3
tissueMap["eyeRetina"] = 4
tissueMap["eyeWhite"] = 5
tissueMap["heart"] = 6
tissueMap["ileum"] = 7
tissueMap["kidney"] = 8
tissueMap["intestine"] = 9
tissueMap["liver"] = 10
tissueMap["lung"] = 11
tissueMap["nerve"] = 12
tissueMap["skeleton"] = 13
tissueMap["spleen"] = 14
tissueMap["stomach"] = 15
return tissueMap
def CreateFrogActor(fileName, tissue):
reader = vtk.vtkMetaImageReader()
reader.SetFileName(fileName)
reader.Update()
selectTissue = vtk.vtkImageThreshold()
selectTissue.ThresholdBetween(tissue, tissue)
selectTissue.SetInValue(255)
selectTissue.SetOutValue(0)
selectTissue.SetInputConnection(reader.GetOutputPort())
gaussianRadius = 1
gaussianStandardDeviation = 2.0
gaussian = vtk.vtkImageGaussianSmooth()
gaussian.SetStandardDeviations(gaussianStandardDeviation, gaussianStandardDeviation, gaussianStandardDeviation)
gaussian.SetRadiusFactors(gaussianRadius, gaussianRadius, gaussianRadius)
gaussian.SetInputConnection(selectTissue.GetOutputPort())
isoValue = 127.5
mcubes = vtk.vtkMarchingCubes()
mcubes.SetInputConnection(gaussian.GetOutputPort())
mcubes.ComputeScalarsOff()
mcubes.ComputeGradientsOff()
mcubes.ComputeNormalsOff()
mcubes.SetValue(0, isoValue)
smoothingIterations = 5
passBand = 0.001
featureAngle = 60.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(mcubes.GetOutputPort())
smoother.SetNumberOfIterations(smoothingIterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(featureAngle)
smoother.SetPassBand(passBand)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(smoother.GetOutputPort())
normals.SetFeatureAngle(featureAngle)
stripper = vtk.vtkStripper()
stripper.SetInputConnection(normals.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stripper.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Visualization/ViewFrog.py | Python | apache-2.0 | 5,808 |
#!/usr/bin/python
#8.4 Open the file romeo.txt and read it line by line. For each line, split the line into a list of words using the split() function. The program should build a list of words. For each word on each line check to see if the word is already in the list and if not append it to the list. When the program completes, sort and print the resulting words in alphabetical order.
#You can download the sample data at http://www.pythonlearn.com/code/romeo.txt
my_list = list()
with open('romeo.txt', 'r') as fh:
for line in fh:
words = line.strip().lower().split()
for word in words:
if word not in my_list:
my_list.append(word)
print sorted(my_list) | hiteshagrawal/python | romeo.py | Python | gpl-2.0 | 671 |
import unittest
from datetime import datetime
from django.test import SimpleTestCase, ignore_warnings
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango30Warning
from django.utils.http import (
base36_to_int, cookie_date, escape_leading_slashes, http_date,
int_to_base36, is_safe_url, is_same_domain, parse_etags, parse_http_date,
quote_etag, urlencode, urlquote, urlquote_plus, urlsafe_base64_decode,
urlsafe_base64_encode, urlunquote, urlunquote_plus,
)
class URLEncodeTests(SimpleTestCase):
cannot_encode_none_msg = (
'Cannot encode None in a query string. Did you mean to pass an '
'empty string or omit the value?'
)
def test_tuples(self):
self.assertEqual(urlencode((('a', 1), ('b', 2), ('c', 3))), 'a=1&b=2&c=3')
def test_dict(self):
result = urlencode({'a': 1, 'b': 2, 'c': 3})
# Dictionaries are treated as unordered.
self.assertIn(result, [
'a=1&b=2&c=3',
'a=1&c=3&b=2',
'b=2&a=1&c=3',
'b=2&c=3&a=1',
'c=3&a=1&b=2',
'c=3&b=2&a=1',
])
def test_dict_containing_sequence_not_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=False), 'a=%5B%271%27%2C+%272%27%5D')
def test_dict_containing_sequence_doseq(self):
self.assertEqual(urlencode({'a': [1, 2]}, doseq=True), 'a=1&a=2')
def test_dict_containing_empty_sequence_doseq(self):
self.assertEqual(urlencode({'a': []}, doseq=True), '')
def test_multivaluedict(self):
result = urlencode(MultiValueDict({
'name': ['Adrian', 'Simon'],
'position': ['Developer'],
}), doseq=True)
# MultiValueDicts are similarly unordered.
self.assertIn(result, [
'name=Adrian&name=Simon&position=Developer',
'position=Developer&name=Adrian&name=Simon',
])
def test_dict_with_bytes_values(self):
self.assertEqual(urlencode({'a': b'abc'}, doseq=True), 'a=abc')
def test_dict_with_sequence_of_bytes(self):
self.assertEqual(urlencode({'a': [b'spam', b'eggs', b'bacon']}, doseq=True), 'a=spam&a=eggs&a=bacon')
def test_dict_with_bytearray(self):
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': bytearray(range(2))}, doseq=False), 'a=%5B%270%27%2C+%271%27%5D')
def test_generator(self):
def gen():
yield from range(2)
self.assertEqual(urlencode({'a': gen()}, doseq=True), 'a=0&a=1')
self.assertEqual(urlencode({'a': gen()}, doseq=False), 'a=%5B%270%27%2C+%271%27%5D')
def test_none(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': None})
def test_none_in_sequence(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': [None]}, doseq=True)
def test_none_in_generator(self):
def gen():
yield None
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({'a': gen()}, doseq=True)
class Base36IntTests(SimpleTestCase):
def test_roundtrip(self):
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, base36_to_int(int_to_base36(n)))
def test_negative_input(self):
with self.assertRaisesMessage(ValueError, 'Negative base36 conversion input.'):
int_to_base36(-1)
def test_to_base36_errors(self):
for n in ['1', 'foo', {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
int_to_base36(n)
def test_invalid_literal(self):
for n in ['#', ' ']:
with self.assertRaisesMessage(ValueError, "invalid literal for int() with base 36: '%s'" % n):
base36_to_int(n)
def test_input_too_large(self):
with self.assertRaisesMessage(ValueError, 'Base36 input too large'):
base36_to_int('1' * 14)
def test_to_int_errors(self):
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
base36_to_int(n)
def test_values(self):
for n, b36 in [(0, '0'), (1, '1'), (42, '16'), (818469960, 'django')]:
self.assertEqual(int_to_base36(n), b36)
self.assertEqual(base36_to_int(b36), n)
class IsSafeURLTests(unittest.TestCase):
def test_bad_urls(self):
bad_urls = (
'http://example.com',
'http:///example.com',
'https://example.com',
'ftp://example.com',
r'\\example.com',
r'\\\example.com',
r'/\\/example.com',
r'\\\example.com',
r'\\example.com',
r'\\//example.com',
r'/\/example.com',
r'\/example.com',
r'/\example.com',
'http:///example.com',
r'http:/\//example.com',
r'http:\/example.com',
r'http:/\example.com',
'javascript:alert("XSS")',
'\njavascript:alert(x)',
'\x08//example.com',
r'http://otherserver\@example.com',
r'http:\\testserver\@example.com',
r'http://testserver\me:pass@example.com',
r'http://testserver\@example.com',
r'http:\\testserver\confirm\me@example.com',
'http:999999999',
'ftp:9999999999',
'\n',
'http://[2001:cdba:0000:0000:0000:0000:3257:9652/',
'http://2001:cdba:0000:0000:0000:0000:3257:9652]/',
)
for bad_url in bad_urls:
with self.subTest(url=bad_url):
self.assertIs(is_safe_url(bad_url, allowed_hosts={'testserver', 'testserver2'}), False)
def test_good_urls(self):
good_urls = (
'/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://example.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'http://testserver/confirm?email=me@example.com',
'/url%20with%20spaces/',
'path/http:2222222222',
)
for good_url in good_urls:
with self.subTest(url=good_url):
self.assertIs(is_safe_url(good_url, allowed_hosts={'otherserver', 'testserver'}), True)
def test_basic_auth(self):
# Valid basic auth credentials are allowed.
self.assertIs(is_safe_url(r'http://user:pass@testserver/', allowed_hosts={'user:pass@testserver'}), True)
def test_no_allowed_hosts(self):
# A path without host is allowed.
self.assertIs(is_safe_url('/confirm/me@example.com', allowed_hosts=None), True)
# Basic auth without host is not allowed.
self.assertIs(is_safe_url(r'http://testserver\@example.com', allowed_hosts=None), False)
def test_allowed_hosts_str(self):
self.assertIs(is_safe_url('http://good.com/good', allowed_hosts='good.com'), True)
self.assertIs(is_safe_url('http://good.co/evil', allowed_hosts='good.com'), False)
def test_secure_param_https_urls(self):
secure_urls = (
'https://example.com/p',
'HTTPS://example.com/p',
'/view/?param=http://example.com',
)
for url in secure_urls:
with self.subTest(url=url):
self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), True)
def test_secure_param_non_https_urls(self):
insecure_urls = (
'http://example.com/p',
'ftp://example.com/p',
'//example.com/p',
)
for url in insecure_urls:
with self.subTest(url=url):
self.assertIs(is_safe_url(url, allowed_hosts={'example.com'}, require_https=True), False)
class URLSafeBase64Tests(unittest.TestCase):
def test_roundtrip(self):
bytestring = b'foo'
encoded = urlsafe_base64_encode(bytestring)
decoded = urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
class URLQuoteTests(unittest.TestCase):
def test_quote(self):
self.assertEqual(urlquote('Paris & Orl\xe9ans'), 'Paris%20%26%20Orl%C3%A9ans')
self.assertEqual(urlquote('Paris & Orl\xe9ans', safe="&"), 'Paris%20&%20Orl%C3%A9ans')
def test_unquote(self):
self.assertEqual(urlunquote('Paris%20%26%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote('Paris%20&%20Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
def test_quote_plus(self):
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans'), 'Paris+%26+Orl%C3%A9ans')
self.assertEqual(urlquote_plus('Paris & Orl\xe9ans', safe="&"), 'Paris+&+Orl%C3%A9ans')
def test_unquote_plus(self):
self.assertEqual(urlunquote_plus('Paris+%26+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
self.assertEqual(urlunquote_plus('Paris+&+Orl%C3%A9ans'), 'Paris & Orl\xe9ans')
class IsSameDomainTests(unittest.TestCase):
def test_good(self):
for pair in (
('example.com', 'example.com'),
('example.com', '.example.com'),
('foo.example.com', '.example.com'),
('example.com:8888', 'example.com:8888'),
('example.com:8888', '.example.com:8888'),
('foo.example.com:8888', '.example.com:8888'),
):
self.assertIs(is_same_domain(*pair), True)
def test_bad(self):
for pair in (
('example2.com', 'example.com'),
('foo.example.com', 'example.com'),
('example.com:9999', 'example.com:8888'),
('foo.example.com:8888', ''),
):
self.assertIs(is_same_domain(*pair), False)
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
self.assertEqual(
parse_etags(r'"" , "etag", "e\\tag", W/"weak"'),
['""', '"etag"', r'"e\\tag"', 'W/"weak"']
)
self.assertEqual(parse_etags('*'), ['*'])
# Ignore RFC 2616 ETags that are invalid according to RFC 7232.
self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"'])
def test_quoting(self):
self.assertEqual(quote_etag('etag'), '"etag"') # unquoted
self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted
self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), 'Mon, 01 Jan 2007 01:54:21 GMT')
@ignore_warnings(category=RemovedInDjango30Warning)
def test_cookie_date(self):
t = 1167616461.0
self.assertEqual(cookie_date(t), 'Mon, 01-Jan-2007 01:54:21 GMT')
def test_parsing_rfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_rfc850(self):
parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_asctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))
def test_parsing_year_less_than_70(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 0037')
self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(2037, 11, 6, 8, 49, 37))
class EscapeLeadingSlashesTests(unittest.TestCase):
def test(self):
tests = (
('//example.com', '/%2Fexample.com'),
('//', '/%2F'),
)
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(escape_leading_slashes(url), expected)
| nesdis/djongo | tests/django_tests/tests/v22/tests/utils_tests/test_http.py | Python | agpl-3.0 | 12,062 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, os
from webnotes.modules import scrub, get_module_path, scrub_dt_dn
def import_files(module, dt=None, dn=None, force=False):
if type(module) is list:
out = []
for m in module:
out.append(import_file(m[0], m[1], m[2], force))
return out
else:
return import_file(module, dt, dn, force)
def import_file(module, dt, dn, force=False):
"""Sync a file from txt if modifed, return false if not updated"""
webnotes.in_import = True
dt, dn = scrub_dt_dn(dt, dn)
path = os.path.join(get_module_path(module),
os.path.join(dt, dn, dn + '.txt'))
ret = import_file_by_path(path, force)
webnotes.in_import = False
return ret
def import_file_by_path(path, force=False):
if os.path.exists(path):
from webnotes.modules.utils import peval_doclist
with open(path, 'r') as f:
doclist = peval_doclist(f.read())
if doclist:
doc = doclist[0]
if not force:
# check if timestamps match
if doc['modified']==str(webnotes.conn.get_value(doc['doctype'], doc['name'], 'modified')):
return False
original_modified = doc["modified"]
import_doclist(doclist)
# since there is a new timestamp on the file, update timestamp in
webnotes.conn.sql("update `tab%s` set modified=%s where name=%s" % \
(doc['doctype'], '%s', '%s'),
(original_modified, doc['name']))
return True
else:
raise Exception, '%s missing' % path
ignore_values = {
"Report": ["disabled"],
}
ignore_doctypes = ["Page Role", "DocPerm"]
def import_doclist(doclist):
doctype = doclist[0]["doctype"]
name = doclist[0]["name"]
old_doc = None
doctypes = set([d["doctype"] for d in doclist])
ignore = list(doctypes.intersection(set(ignore_doctypes)))
if doctype in ignore_values:
if webnotes.conn.exists(doctype, name):
old_doc = webnotes.doc(doctype, name)
# delete old
webnotes.delete_doc(doctype, name, force=1, ignore_doctypes=ignore, for_reload=True)
# don't overwrite ignored docs
doclist1 = remove_ignored_docs_if_they_already_exist(doclist, ignore, name)
# update old values (if not to be overwritten)
if doctype in ignore_values and old_doc:
update_original_values(doclist1, doctype, old_doc)
# reload_new
new_bean = webnotes.bean(doclist1)
new_bean.ignore_children_type = ignore
new_bean.ignore_check_links = True
new_bean.ignore_validate = True
new_bean.ignore_permissions = True
new_bean.ignore_mandatory = True
if doctype=="DocType" and name in ["DocField", "DocType"]:
new_bean.ignore_fields = True
new_bean.insert()
def remove_ignored_docs_if_they_already_exist(doclist, ignore, name):
doclist1 = doclist
if ignore:
has_records = []
for d in ignore:
if webnotes.conn.get_value(d, {"parent":name}):
has_records.append(d)
if has_records:
doclist1 = filter(lambda d: d["doctype"] not in has_records, doclist)
return doclist1
def update_original_values(doclist, doctype, old_doc):
for key in ignore_values[doctype]:
doclist[0][key] = old_doc.fields[key]
| rohitw1991/latestadbwnf | webnotes/modules/import_file.py | Python | mit | 3,122 |
# ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
from django.conf import settings
from django.test import TestCase, RequestFactory, override_settings
from django.utils.translation import gettext_lazy
from rest_framework import exceptions
from backoffice.settings.rest_framework.authentication import ESBAuthentication
from base.models.person import Person
@override_settings(REST_FRAMEWORK_ESB_AUTHENTICATION_SECRET_KEY="401f7ac837da42b97f613d789819ff93537bee6a")
class TestESBAuthentication(TestCase):
@classmethod
def setUpTestData(cls):
cls.authentication = ESBAuthentication()
cls.request_factory = RequestFactory()
cls.extra_http_headers = {
'HTTP_AUTHORIZATION': 'ESB 401f7ac837da42b97f613d789819ff93537bee6a',
'HTTP_ACCEPT_LANGUAGE': 'en',
'HTTP_X_USER_FIRSTNAME': 'Durant',
'HTTP_X_USER_LASTNAME': 'Thomas',
'HTTP_X_USER_EMAIL': 'thomas@dummy.com',
'HTTP_X_USER_GLOBALID': '0123456789',
}
def test_assert_get_secret_key(self):
self.assertEqual(
self.authentication.get_secret_key(), settings.REST_FRAMEWORK_ESB_AUTHENTICATION_SECRET_KEY
)
def test_assert_mandatory_headers(self):
mandatory_headers = [
'HTTP_X_USER_GLOBALID', 'HTTP_X_USER_FIRSTNAME', 'HTTP_X_USER_LASTNAME', 'HTTP_X_USER_EMAIL'
]
self.assertEqual(
self.authentication.get_mandatory_headers(), mandatory_headers
)
def test_assert_raise_exception_if_esb_header_empty_str(self):
http_headers = {
**self.extra_http_headers,
'HTTP_AUTHORIZATION': 'ESB '
}
request = self.request_factory.get('/dummy_url', **http_headers)
with self.assertRaises(exceptions.AuthenticationFailed) as context:
self.authentication.authenticate(request)
error_msg = gettext_lazy('Invalid ESB header. No credentials provided.')
self.assertEqual(context.exception.detail, error_msg)
def test_assert_raise_exception_if_esb_header_contains_multiple_space(self):
http_headers = {
**self.extra_http_headers,
'HTTP_AUTHORIZATION': 'ESB 401f7ac837da42b97f613d789819ff93537bee6a 565655'
}
request = self.request_factory.get('/dummy_url', **http_headers)
with self.assertRaises(exceptions.AuthenticationFailed) as context:
self.authentication.authenticate(request)
error_msg = gettext_lazy('Invalid ESB header. Secret key string should not contain spaces.')
self.assertEqual(context.exception.detail, error_msg)
def test_assert_raise_exception_if_missing_one_mandatory_headers(self):
for mandatory_header in self.authentication.get_mandatory_headers():
with self.subTest(mandatory_header=mandatory_header):
http_headers = {
**self.extra_http_headers,
mandatory_header: ''
}
request = self.request_factory.get('/dummy_url', **http_headers)
with self.assertRaises(exceptions.AuthenticationFailed) as context:
self.authentication.authenticate(request)
error_msg = gettext_lazy('Missing mandatory headers. '
'%(mandatory_headers)s should be present and filled') % {
'mandatory_headers': ", ".join(self.authentication.get_mandatory_headers())
}
self.assertEqual(context.exception.detail, error_msg)
def test_assert_raise_exception_if_secret_key_provided_is_not_the_same_as_settings(self):
http_headers = {
**self.extra_http_headers,
'HTTP_AUTHORIZATION': 'ESB 6565656565656565'
}
request = self.request_factory.get('/dummy_url', **http_headers)
with self.assertRaises(exceptions.AuthenticationFailed) as context:
self.authentication.authenticate(request)
self.assertEqual(context.exception.detail, gettext_lazy('Invalid token.'))
def test_assert_create_person_and_user_if_not_exist(self):
request = self.request_factory.get('/dummy_url', **self.extra_http_headers)
user_created, secret_key = self.authentication.authenticate(request)
self.assertEqual(user_created.username, self.extra_http_headers['HTTP_X_USER_EMAIL'])
person_created = user_created.person
self.assertEqual(person_created.first_name, self.extra_http_headers['HTTP_X_USER_FIRSTNAME'])
self.assertEqual(person_created.last_name, self.extra_http_headers['HTTP_X_USER_LASTNAME'])
self.assertEqual(person_created.email, self.extra_http_headers['HTTP_X_USER_EMAIL'])
self.assertEqual(person_created.language, self.extra_http_headers['HTTP_ACCEPT_LANGUAGE'])
def test_assert_update_person_if_person_exist_but_user_not_exist(self):
existing_person = Person.objects.create(
global_id=self.extra_http_headers['HTTP_X_USER_GLOBALID'],
first_name="Paul",
last_name="Dutronc",
email="paul.dutronc@dummy.com"
)
request = self.request_factory.get('/dummy_url', **self.extra_http_headers)
user_created, secret_key = self.authentication.authenticate(request)
self.assertEqual(user_created.person.pk, existing_person.pk)
self.assertEqual(user_created.username, self.extra_http_headers['HTTP_X_USER_EMAIL'])
| uclouvain/osis | backoffice/tests/settings/rest_framework/test_authentication.py | Python | agpl-3.0 | 6,687 |
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import os
import re
from ansible.module_utils.facts.virtual.base import Virtual, VirtualCollector
from ansible.module_utils.facts.utils import get_file_content, get_file_lines
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
virtual_facts = {}
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
virtual_facts['virtualization_type'] = 'docker'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
virtual_facts['virtualization_type'] = 'lxc'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ'):
if re.search('container=lxc', line):
virtual_facts['virtualization_type'] = 'lxc'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists('/proc/vz') and not os.path.exists('/proc/lve'):
virtual_facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
virtual_facts['virtualization_role'] = 'host'
else:
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
virtual_facts['virtualization_type'] = systemd_container
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists("/proc/xen"):
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
virtual_facts['virtualization_role'] = 'host'
except IOError:
pass
return virtual_facts
# assume guest for this block
virtual_facts['virtualization_role'] = 'guest'
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ('KVM', 'Bochs'):
virtual_facts['virtualization_type'] = 'kvm'
return virtual_facts
if product_name == 'RHEV Hypervisor':
virtual_facts['virtualization_type'] = 'RHEV'
return virtual_facts
if product_name in ('VMware Virtual Platform', 'VMware7,1'):
virtual_facts['virtualization_type'] = 'VMware'
return virtual_facts
if product_name in ('OpenStack Compute', 'OpenStack Nova'):
virtual_facts['virtualization_type'] = 'openstack'
return virtual_facts
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
virtual_facts['virtualization_type'] = 'xen'
return virtual_facts
if bios_vendor == 'innotek GmbH':
virtual_facts['virtualization_type'] = 'virtualbox'
return virtual_facts
if bios_vendor in ('Amazon EC2', 'DigitalOcean', 'Hetzner'):
virtual_facts['virtualization_type'] = 'kvm'
return virtual_facts
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
KVM_SYS_VENDORS = ('QEMU', 'oVirt', 'Amazon EC2', 'DigitalOcean', 'Google', 'Scaleway')
if sys_vendor in KVM_SYS_VENDORS:
virtual_facts['virtualization_type'] = 'kvm'
return virtual_facts
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
virtual_facts['virtualization_type'] = 'VirtualPC'
return virtual_facts
if sys_vendor == 'Parallels Software International Inc.':
virtual_facts['virtualization_type'] = 'parallels'
return virtual_facts
if sys_vendor == 'OpenStack Foundation':
virtual_facts['virtualization_type'] = 'openstack'
return virtual_facts
# unassume guest
del virtual_facts['virtualization_role']
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match(r'^VxID:\s+\d+', line):
virtual_facts['virtualization_type'] = 'linux_vserver'
if re.match(r'^VxID:\s+0', line):
virtual_facts['virtualization_role'] = 'host'
else:
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
virtual_facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
virtual_facts['virtualization_type'] = 'uml'
elif re.match('^machine.*CHRP IBM pSeries .emulated by qemu.', line):
virtual_facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*PowerVM Lx86', line):
virtual_facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
virtual_facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.splitlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
virtual_facts['virtualization_type'] = data[1].strip()
else:
virtual_facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if virtual_facts['virtualization_type'] == 'PR/SM':
virtual_facts['virtualization_role'] = 'LPAR'
else:
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'host'
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
for f in glob.glob('/proc/[0-9]*/comm'):
try:
with open(f) as virt_fh:
comm_content = virt_fh.read().rstrip()
if comm_content == 'vdsm':
virtual_facts['virtualization_type'] = 'RHEV'
break
except Exception:
pass
return virtual_facts
if 'vboxdrv' in modules:
virtual_facts['virtualization_type'] = 'virtualbox'
virtual_facts['virtualization_role'] = 'host'
return virtual_facts
if 'virtio' in modules:
virtual_facts['virtualization_type'] = 'kvm'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# In older Linux Kernel versions, /sys filesystem is not available
# dmidecode is the safest option to parse virtualization related values
dmi_bin = self.module.get_bin_path('dmidecode')
# We still want to continue even if dmidecode is not available
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s system-product-name' % dmi_bin)
if rc == 0:
# Strip out commented lines (specific dmidecode output)
vendor_name = ''.join([line.strip() for line in out.splitlines() if not line.startswith('#')])
if vendor_name.startswith('VMware'):
virtual_facts['virtualization_type'] = 'VMware'
virtual_facts['virtualization_role'] = 'guest'
return virtual_facts
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
virtual_facts['virtualization_type'] = 'NA'
virtual_facts['virtualization_role'] = 'NA'
return virtual_facts
class LinuxVirtualCollector(VirtualCollector):
_fact_class = LinuxVirtual
_platform = 'Linux'
| sestrella/ansible | lib/ansible/module_utils/facts/virtual/linux.py | Python | gpl-3.0 | 10,658 |
import base64
import binascii
import mimetypes
import urllib.parse
import uuid
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import UploadedFile
from django.utils.translation import ugettext as _
from rest_framework.fields import FileField, SkipField
from mainsite.validators import ValidImageValidator
class Base64FileField(FileField):
# mimetypes.guess_extension() may return different values for same mimetype, but we need one extension for one mime
_MIME_MAPPING = {
'image/jpeg': '.jpg',
'audio/wav': '.wav',
'image/svg+xml': '.svg'
}
_ERROR_MESSAGE = _('Base64 string is incorrect')
def to_internal_value(self, data):
if isinstance(data, UploadedFile):
return super(Base64FileField, self).to_internal_value(data)
try:
mime, encoded_data = data.replace('data:', '', 1).split(';base64,')
extension = self._MIME_MAPPING[mime] if mime in list(self._MIME_MAPPING.keys()) else mimetypes.guess_extension(mime)
if extension is None:
raise ValidationError('Invalid MIME type')
ret = ContentFile(base64.b64decode(encoded_data), name='{name}{extension}'.format(name=str(uuid.uuid4()),
extension=extension))
return ret
except (ValueError, binascii.Error):
return super(Base64FileField, self).to_internal_value(data)
class ValidImageField(Base64FileField):
default_validators = [ValidImageValidator()]
def __init__(self, skip_http=True, allow_empty_file=False, use_url=True, allow_null=True, **kwargs):
self.skip_http = skip_http
self.use_public = kwargs.pop('use_public', False)
super(ValidImageField, self).__init__(
allow_empty_file=allow_empty_file, use_url=use_url, allow_null=allow_null, **kwargs
)
def to_internal_value(self, data):
# Skip http/https urls to avoid overwriting valid data when, for example, a client GETs and subsequently PUTs an
# entity containing an image URL.
if self.skip_http and not isinstance(data, UploadedFile) and urllib.parse.urlparse(data).scheme in ('http', 'https'):
raise SkipField()
self.source_attrs = ['image'] # Kind of a dirty hack, because this is failing to stick if set on init.
return super(ValidImageField, self).to_internal_value(data)
def to_representation(self, value):
if self.use_public:
try:
if getattr(value, 'instance', None):
return value.instance.image_url(public=True) # sometimes value is a FileField despite source="*"
return value.image_url(public=True)
except AttributeError:
pass
try:
return super(ValidImageField, self).to_representation(value.image)
except AttributeError:
return super(ValidImageField, self).to_representation(value)
| concentricsky/badgr-server | apps/mainsite/drf_fields.py | Python | agpl-3.0 | 3,112 |
from lino.projects.std.settings import *
# configure_plugin('countries', country_code='BE')
class Site(Site):
verbose_name = "20121124"
# demo_fixtures = ["few_countries", "few_cities", "demo"]
def get_installed_apps(self):
yield super(Site, self).get_installed_apps()
yield 'lino_book.projects.20121124'
SITE = Site(globals())
DEBUG = True
# INSTALLED_APPS = ['lino_book.projects.20121124']
#
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': ':memory:'
# }
# }
#SECRET_KEY = "123"
| lino-framework/book | lino_book/projects/20121124/settings.py | Python | bsd-2-clause | 579 |
"""Provides a Hue API to control Home Assistant."""
import asyncio
import logging
from aiohttp import web
from homeassistant import core
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_VOLUME_SET,
SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, STATE_ON, STATE_OFF,
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, ATTR_SUPPORTED_FEATURES,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS
)
from homeassistant.components.media_player import (
ATTR_MEDIA_VOLUME_LEVEL, SUPPORT_VOLUME_SET,
)
from homeassistant.components.fan import (
ATTR_SPEED, SUPPORT_SET_SPEED, SPEED_OFF, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH
)
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
ATTR_EMULATED_HUE = 'emulated_hue'
ATTR_EMULATED_HUE_NAME = 'emulated_hue_name'
HUE_API_STATE_ON = 'on'
HUE_API_STATE_BRI = 'bri'
class HueUsernameView(HomeAssistantView):
"""Handle requests to create a username for the emulated hue bridge."""
url = '/api'
name = 'emulated_hue:api:create_username'
extra_urls = ['/api/']
requires_auth = False
@asyncio.coroutine
def post(self, request):
"""Handle a POST request."""
try:
data = yield from request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
if 'devicetype' not in data:
return self.json_message('devicetype not specified',
HTTP_BAD_REQUEST)
return self.json([{'success': {'username': '12345678901234567890'}}])
class HueAllLightsStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights'
name = 'emulated_hue:lights:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to get the list of available lights."""
hass = request.app['hass']
json_response = {}
for entity in hass.states.async_all():
if self.config.is_entity_exposed(entity):
state, brightness = get_entity_state(self.config, entity)
number = self.config.entity_id_to_number(entity.entity_id)
json_response[number] = entity_to_json(
entity, state, brightness)
return self.json(json_response)
class HueOneLightStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_id}'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username, entity_id):
"""Process a request to get the state of an individual light."""
hass = request.app['hass']
entity_id = self.config.number_to_entity_id(entity_id)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return web.Response(text="Entity not found", status=404)
if not self.config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
state, brightness = get_entity_state(self.config, entity)
json_response = entity_to_json(entity, state, brightness)
return self.json(json_response)
class HueOneLightChangeView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_number}/state'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@asyncio.coroutine
def put(self, request, username, entity_number):
"""Process a request to set the state of an individual light."""
config = self.config
hass = request.app['hass']
entity_id = config.number_to_entity_id(entity_number)
if entity_id is None:
_LOGGER.error('Unknown entity number: %s', entity_number)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
if not config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
try:
request_json = yield from request.json()
except ValueError:
_LOGGER.error('Received invalid json')
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
# Parse the request into requested "on" status and brightness
parsed = parse_hue_api_put_light_body(request_json, entity)
if parsed is None:
_LOGGER.error('Unable to parse data: %s', request_json)
return web.Response(text="Bad request", status=400)
result, brightness = parsed
# Choose general HA domain
domain = core.DOMAIN
# Entity needs separate call to turn on
turn_on_needed = False
# Convert the resulting "on" status into the service we need to call
service = SERVICE_TURN_ON if result else SERVICE_TURN_OFF
# Construct what we need to send to the service
data = {ATTR_ENTITY_ID: entity_id}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == "light":
if entity_features & SUPPORT_BRIGHTNESS:
if brightness is not None:
data[ATTR_BRIGHTNESS] = brightness
# If the requested entity is a script add some variables
elif entity.domain == "script":
data['variables'] = {
'requested_state': STATE_ON if result else STATE_OFF
}
if brightness is not None:
data['variables']['requested_level'] = brightness
# If the requested entity is a media player, convert to volume
elif entity.domain == "media_player":
if entity_features & SUPPORT_VOLUME_SET:
if brightness is not None:
turn_on_needed = True
domain = entity.domain
service = SERVICE_VOLUME_SET
# Convert 0-100 to 0.0-1.0
data[ATTR_MEDIA_VOLUME_LEVEL] = brightness / 100.0
# If the requested entity is a cover, convert to open_cover/close_cover
elif entity.domain == "cover":
domain = entity.domain
if service == SERVICE_TURN_ON:
service = SERVICE_OPEN_COVER
else:
service = SERVICE_CLOSE_COVER
# If the requested entity is a fan, convert to speed
elif entity.domain == "fan":
if entity_features & SUPPORT_SET_SPEED:
if brightness is not None:
domain = entity.domain
# Convert 0-100 to a fan speed
if brightness == 0:
data[ATTR_SPEED] = SPEED_OFF
elif brightness <= 33.3 and brightness > 0:
data[ATTR_SPEED] = SPEED_LOW
elif brightness <= 66.6 and brightness > 33.3:
data[ATTR_SPEED] = SPEED_MEDIUM
elif brightness <= 100 and brightness > 66.6:
data[ATTR_SPEED] = SPEED_HIGH
if entity.domain in config.off_maps_to_on_domains:
# Map the off command to on
service = SERVICE_TURN_ON
# Caching is required because things like scripts and scenes won't
# report as "off" to Alexa if an "off" command is received, because
# they'll map to "on". Thus, instead of reporting its actual
# status, we report what Alexa will want to see, which is the same
# as the actual requested command.
config.cached_states[entity_id] = (result, brightness)
# Separate call to turn on needed
if turn_on_needed:
hass.async_add_job(hass.services.async_call(
core.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id},
blocking=True))
hass.async_add_job(hass.services.async_call(
domain, service, data, blocking=True))
json_response = \
[create_hue_success_response(entity_id, HUE_API_STATE_ON, result)]
if brightness is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_BRI, brightness))
return self.json(json_response)
def parse_hue_api_put_light_body(request_json, entity):
"""Parse the body of a request to change the state of a light."""
if HUE_API_STATE_ON in request_json:
if not isinstance(request_json[HUE_API_STATE_ON], bool):
return None
if request_json['on']:
# Echo requested device be turned on
brightness = None
report_brightness = False
result = True
else:
# Echo requested device be turned off
brightness = None
report_brightness = False
result = False
if HUE_API_STATE_BRI in request_json:
try:
# Clamp brightness from 0 to 255
brightness = \
max(0, min(int(request_json[HUE_API_STATE_BRI]), 255))
except ValueError:
return None
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == "light":
if entity_features & SUPPORT_BRIGHTNESS:
report_brightness = True
result = (brightness > 0)
elif (entity.domain == "script" or
entity.domain == "media_player" or
entity.domain == "fan"):
# Convert 0-255 to 0-100
level = brightness / 255 * 100
brightness = round(level)
report_brightness = True
result = True
return (result, brightness) if report_brightness else (result, None)
def get_entity_state(config, entity):
"""Retrieve and convert state and brightness values for an entity."""
cached_state = config.cached_states.get(entity.entity_id, None)
if cached_state is None:
final_state = entity.state != STATE_OFF
final_brightness = entity.attributes.get(
ATTR_BRIGHTNESS, 255 if final_state else 0)
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == "light":
if entity_features & SUPPORT_BRIGHTNESS:
pass
elif entity.domain == "media_player":
level = entity.attributes.get(
ATTR_MEDIA_VOLUME_LEVEL, 1.0 if final_state else 0.0)
# Convert 0.0-1.0 to 0-255
final_brightness = round(min(1.0, level) * 255)
elif entity.domain == "fan":
speed = entity.attributes.get(ATTR_SPEED, 0)
# Convert 0.0-1.0 to 0-255
final_brightness = 0
if speed == SPEED_LOW:
final_brightness = 85
elif speed == SPEED_MEDIUM:
final_brightness = 170
elif speed == SPEED_HIGH:
final_brightness = 255
else:
final_state, final_brightness = cached_state
# Make sure brightness is valid
if final_brightness is None:
final_brightness = 255 if final_state else 0
return (final_state, final_brightness)
def entity_to_json(entity, is_on=None, brightness=None):
"""Convert an entity to its Hue bridge JSON representation."""
name = entity.attributes.get(ATTR_EMULATED_HUE_NAME, entity.name)
return {
'state':
{
HUE_API_STATE_ON: is_on,
HUE_API_STATE_BRI: brightness,
'reachable': True
},
'type': 'Dimmable light',
'name': name,
'modelid': 'HASS123',
'uniqueid': entity.entity_id,
'swversion': '123'
}
def create_hue_success_response(entity_id, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = '/lights/{}/state/{}'.format(entity_id, attr)
return {'success': {success_key: value}}
| kyvinh/home-assistant | homeassistant/components/emulated_hue/hue_api.py | Python | apache-2.0 | 13,038 |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that does nothing successfully."""
| ric2b/Vivaldi-browser | chromium/build/noop.py | Python | bsd-3-clause | 207 |
from pecan import expose
from ceph_installer.controllers import (
tasks, mon, osd, rgw, calamari, errors, setup, agent,
status
)
class ApiController(object):
@expose('json')
def index(self):
# TODO: allow some autodiscovery here so that clients can see what is
# available
return dict()
agent = agent.AgentController()
tasks = tasks.TasksController()
mon = mon.MONController()
osd = osd.OSDController()
rgw = rgw.RGWController()
calamari = calamari.CalamariController()
status = status.StatusController()
class RootController(object):
@expose('json')
def index(self):
return dict()
api = ApiController()
errors = errors.ErrorController()
setup = setup.SetupController()
| ceph/ceph-installer | ceph_installer/controllers/root.py | Python | mit | 774 |
import os
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_mona.util import base_units
from electrum_mona.storage import StorageReadWriteError
from ...i18n import _
from .label_dialog import LabelDialog
Builder.load_string('''
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: ''
disable_new: True
BoxLayout:
orientation: 'vertical'
padding: '10dp'
FileChooserIconView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '*.*'
path: root.path
rootpath: root.path
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 3
size_hint_y: 0.1
Button:
id: new_button
disabled: root.disable_new
size_hint: 0.1, None
height: '48dp'
text: _('New')
on_release:
popup.dismiss()
root.new_wallet(wallet_selector.path)
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open')
disabled: not wallet_selector.selection
on_release:
popup.dismiss()
root.callback(wallet_selector.selection[0])
''')
class WalletDialog(Factory.Popup):
def __init__(self, path, callback, disable_new):
Factory.Popup.__init__(self)
self.path = path
self.callback = callback
self.disable_new = disable_new
def new_wallet(self, dirname):
assert self.disable_new is False
def cb(filename):
if not filename:
return
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
self.callback(os.path.join(dirname, filename))
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
| wakiyamap/electrum-mona | electrum_mona/gui/kivy/uix/dialogs/wallets.py | Python | mit | 2,112 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import os
import mock
import unittest
from contextlib import contextmanager
from shutil import rmtree
from StringIO import StringIO
from tempfile import mkdtemp
from test.unit import FakeLogger
from xml.dom import minidom
from eventlet import spawn, Timeout, listen
import simplejson
from swift.common.swob import Request, HeaderKeyDict
import swift.container
from swift.container import server as container_server
from swift.common.utils import (normalize_timestamp, mkdirs, public,
replication, lock_parent_directory)
from test.unit import fake_http_connect
from swift.common.request_helpers import get_sys_meta_prefix
@contextmanager
def save_globals():
orig_http_connect = getattr(swift.container.server, 'http_connect',
None)
try:
yield True
finally:
swift.container.server.http_connect = orig_http_connect
class TestContainerController(unittest.TestCase):
"""Test swift.container.server.ContainerController"""
def setUp(self):
"""Set up for testing swift.object_server.ObjectController"""
self.testdir = os.path.join(mkdtemp(),
'tmp_test_object_server_ObjectController')
mkdirs(self.testdir)
rmtree(self.testdir)
mkdirs(os.path.join(self.testdir, 'sda1'))
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.object_server.ObjectController"""
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_acl_container(self):
# Ensure no acl by default
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure POSTing acls works
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '1', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
# Ensure we can clear acls on POST
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3', 'X-Container-Read': '',
'X-Container-Write': ''})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('204'))
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assert_('x-container-read' not in response.headers)
self.assert_('x-container-write' not in response.headers)
# Ensure PUTing acls works
req = Request.blank(
'/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '4', 'X-Container-Read': '.r:*',
'X-Container-Write': 'account:user'})
resp = req.get_response(self.controller)
self.assert_(resp.status.startswith('201'))
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'HEAD'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(response.headers.get('x-container-read'), '.r:*')
self.assertEquals(response.headers.get('x-container-write'),
'account:user')
def test_HEAD(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '0'})
response = req.get_response(self.controller)
self.assert_(response.status.startswith('204'))
self.assertEquals(int(response.headers['x-container-bytes-used']), 0)
self.assertEquals(int(response.headers['x-container-object-count']), 0)
req2 = Request.blank(
'/sda1/p/a/c/o', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_SIZE': 42,
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x'})
req2.get_response(self.controller)
response = req.get_response(self.controller)
self.assertEquals(int(response.headers['x-container-bytes-used']), 42)
self.assertEquals(int(response.headers['x-container-object-count']), 1)
def test_HEAD_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_HEAD_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_HEAD_invalid_content_type(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank(
'/sda1/p/a/c?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.container.backend import ContainerBroker as OrigCoBr
class InterceptedCoBr(OrigCoBr):
def __init__(self, *args, **kwargs):
super(InterceptedCoBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self.db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self.db_file = self._saved_db_file
return super(InterceptedCoBr, self).initialize(*args, **kwargs)
with mock.patch("swift.container.server.ContainerBroker",
InterceptedCoBr):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1', 'X-Size': '0',
'X-Content-Type': 'text/plain', 'X-ETag': 'e'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
self.assertEquals(resp.headers.get('x-container-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
key2 = '%sTest2' % prefix
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
key: 'Value'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
key2: 'Value2'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
self.assertEquals(resp.headers.get(key2.lower()), 'Value2')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
key: 'New Value'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
key: 'Old Value'})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
key: ''})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 202)
req = Request.blank('/sda1/p/a/c')
resp = self.controller.GET(req)
self.assertEquals(resp.status_int, 204)
self.assert_(key.lower() not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Container-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Container-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Container-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('x-container-meta-test'),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Container-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
self.assert_('x-container-meta-test' not in resp.headers)
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('container')
key = '%sTest' % prefix
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = self.controller.PUT(req)
self.assertEquals(resp.status_int, 201)
# Set metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
key: 'Value'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()), 'Value')
# Update metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
key: 'New Value'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Send old update to metadata header
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
key: 'Old Value'})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get(key.lower()),
'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
key: ''})
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'})
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 204)
self.assert_(key.lower() not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_POST_invalid_container_sync_to(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'},
headers={'x-container-sync-to': '192.168.0.1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c/',
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_obj_not_found(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_container_not_found(self):
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_PUT_utf8(self):
snowman = u'\u2603'
container_name = snowman.encode('utf-8')
req = Request.blank(
'/sda1/p/a/%s' % container_name, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_account_update_mismatched_host_device(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '127.0.0.1:0',
'X-Account-Partition': '123',
'X-Account-Device': 'sda1,sda2'})
broker = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
resp = self.controller.account_update(req, 'a', 'c', broker)
self.assertEquals(resp.status_int, 400)
def test_account_update_account_override_deleted(self):
bindsock = listen(('127.0.0.1', 0))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' %
bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1',
'X-Account-Override-Deleted': 'yes'})
with save_globals():
new_connect = fake_http_connect(200, count=123)
swift.container.server.http_connect = new_connect
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
def test_PUT_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-put-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000001.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 201, '0000000001.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000003.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, '0000000003.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0000000005.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, '0000000005.00000')
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_PUT_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 202)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_POST_reset_container_sync(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
db.set_x_container_sync_points(123, 456)
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to same value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], 123)
self.assertEquals(info['x_container_sync_point2'], 456)
# Set to new value
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'POST'},
headers={'x-timestamp': '1',
'x-container-sync-to': 'http://127.0.0.1:12345/v1/a/c2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
info = db.get_info()
self.assertEquals(info['x_container_sync_point1'], -1)
self.assertEquals(info['x_container_sync_point2'], -1)
def test_DELETE(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_PUT_recreate(self):
path = '/sda1/p/a/c'
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(True, db.is_deleted())
info = db.get_info()
self.assertEquals(info['put_timestamp'], normalize_timestamp('1'))
self.assertEquals(info['delete_timestamp'], normalize_timestamp('2'))
# recreate
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
info = db.get_info()
self.assertEquals(info['put_timestamp'], normalize_timestamp('4'))
self.assertEquals(info['delete_timestamp'], normalize_timestamp('2'))
def test_DELETE_PUT_recreate_replication_race(self):
path = '/sda1/p/a/c'
# create a deleted db
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
req = Request.blank(path, method='DELETE',
headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
self.assertEqual(True, db.is_deleted())
# now save a copy of this db (and remove it from the "current node")
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
db_path = db.db_file
other_path = os.path.join(self.testdir, 'othernode.db')
os.rename(db_path, other_path)
# that should make it missing on this node
req = Request.blank(path, method='GET')
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404) # sanity
# setup the race in os.path.exists (first time no, then yes)
mock_called = []
_real_exists = os.path.exists
def mock_exists(db_path):
rv = _real_exists(db_path)
if not mock_called:
# be as careful as we might hope backend replication can be...
with lock_parent_directory(db_path, timeout=1):
os.rename(other_path, db_path)
mock_called.append((rv, db_path))
return rv
req = Request.blank(path, method='PUT',
headers={'X-Timestamp': '4'})
with mock.patch.object(container_server.os.path, 'exists',
mock_exists):
resp = req.get_response(self.controller)
# db was successfully created
self.assertEqual(resp.status_int // 100, 2)
db = self.controller._get_container_broker('sda1', 'p', 'a', 'c')
self.assertEqual(False, db.is_deleted())
# mock proves the race
self.assertEqual(mock_called[:2],
[(exists, db.db_file) for exists in (False, True)])
# info was updated
info = db.get_info()
self.assertEquals(info['put_timestamp'], normalize_timestamp('4'))
self.assertEquals(info['delete_timestamp'], normalize_timestamp('2'))
def test_DELETE_not_found(self):
# Even if the container wasn't previously heard of, the container
# server will accept the delete and replicate it to where it belongs
# later.
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_object(self):
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0',
'HTTP_X_SIZE': 1, 'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '3'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 409)
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'}, headers={'X-Timestamp': '5'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'}, headers={'X-Timestamp': '6'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_DELETE_account_update(self):
bindsock = listen(('127.0.0.1', 0))
def accept(return_code, expected_timestamp):
try:
with Timeout(3):
sock, addr = bindsock.accept()
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/123/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertEquals(headers['x-delete-timestamp'],
expected_timestamp)
except BaseException as err:
return err
return None
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000002.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 204, '0000000002.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000003.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 404, '0000000003.00000')
try:
with Timeout(3):
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
finally:
err = event.wait()
if err:
raise Exception(err)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '4'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': '0000000005.00000',
'X-Account-Host': '%s:%s' % bindsock.getsockname(),
'X-Account-Partition': '123',
'X-Account-Device': 'sda1'})
event = spawn(accept, 503, '0000000005.00000')
got_exc = False
try:
with Timeout(3):
resp = req.get_response(self.controller)
except BaseException as err:
got_exc = True
finally:
err = event.wait()
if err:
raise Exception(err)
self.assert_(not got_exc)
def test_DELETE_invalid_partition(self):
req = Request.blank(
'/sda1/./a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a/c?limit=%d' %
(container_server.CONTAINER_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_json(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(simplejson.loads(resp.body), [])
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "2",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
for accept in ('application/json', 'application/json;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/json;q=1.0', 'application/*'):
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body), json_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/jsonc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/json',
'Invalid content_type for Accept: %s' % accept)
def test_GET_plain(self):
# make a container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# test an empty container
req = Request.blank(
'/sda1/p/a/plainc', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/plainc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
plain_body = '0\n1\n2\n'
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
for accept in ('', 'text/plain', 'application/xml;q=0.8,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=0.8', '*/*',
'text/plain,application/xml'):
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, plain_body,
'Invalid body for Accept: %s' % accept)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
req = Request.blank(
'/sda1/p/a/plainc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'text/plain',
'Invalid content_type for Accept: %s' % accept)
# test conflicting formats
req = Request.blank(
'/sda1/p/a/plainc?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
# test unknown format uses default plain
req = Request.blank(
'/sda1/p/a/plainc?format=somethingelse',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.body, plain_body)
def test_GET_json_last_modified(self):
# make a container
req = Request.blank(
'/sda1/p/a/jsonc', environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, d in [(0, 1.5), (1, 1.0), ]:
req = Request.blank(
'/sda1/p/a/jsonc/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': d,
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test format
# last_modified format must be uniform, even when there are not msecs
json_body = [{"name": "0",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.500000"},
{"name": "1",
"hash": "x",
"bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}, ]
req = Request.blank(
'/sda1/p/a/jsonc?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(simplejson.loads(resp.body), json_body)
self.assertEquals(resp.charset, 'utf-8')
def test_GET_xml(self):
# make a container
req = Request.blank(
'/sda1/p/a/xmlc', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/xmlc/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
xml_body = '<?xml version="1.0" encoding="UTF-8"?>\n' \
'<container name="xmlc">' \
'<object><name>0</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>1</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'<object><name>2</name><hash>x</hash><bytes>0</bytes>' \
'<content_type>text/plain</content_type>' \
'<last_modified>1970-01-01T00:00:01.000000' \
'</last_modified></object>' \
'</container>'
# tests
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.body, xml_body)
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/xmlc?format=xml',
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
for xml_accept in (
'application/xml', 'application/xml;q=1.0,*/*;q=0.9',
'*/*;q=0.9,application/xml;q=1.0', 'application/xml,text/xml'):
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, xml_body,
'Invalid body for Accept: %s' % xml_accept)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'HEAD'})
req.accept = xml_accept
resp = req.get_response(self.controller)
self.assertEquals(
resp.content_type, 'application/xml',
'Invalid content_type for Accept: %s' % xml_accept)
req = Request.blank(
'/sda1/p/a/xmlc',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'text/xml'
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/xml')
self.assertEquals(resp.body, xml_body)
def test_GET_marker(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit with marker
req = Request.blank('/sda1/p/a/c?limit=2&marker=1',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['2', ])
def test_weird_content_types(self):
snowman = u'\u2603'
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i, ctype in enumerate((snowman.encode('utf-8'),
'text/plain; charset="utf-8"')):
req = Request.blank(
'/sda1/p/a/c/%s' % i, environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1', 'HTTP_X_CONTENT_TYPE': ctype,
'HTTP_X_ETAG': 'x', 'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = [x['content_type'] for x in simplejson.loads(resp.body)]
self.assertEquals(result, [u'\u2603', 'text/plain;charset="utf-8"'])
def test_GET_accept_not_valid(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 406)
def test_GET_limit(self):
# make a container
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
# fill the container
for i in range(3):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
# test limit
req = Request.blank(
'/sda1/p/a/c?limit=2', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
result = resp.body.split()
self.assertEquals(result, ['0', '1'])
def test_GET_prefix(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('a1', 'b1', 'a2', 'b2', 'a3', 'b3'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain',
'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.body.split(), ['a1', 'a2', 'a3'])
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a/c?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412)
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"subdir": "US-OK-"},
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_delimiter_xml(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US-TX-A', 'US-TX-B', 'US-OK-A', 'US-OK-B', 'US-UT-A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US-&delimiter=-&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
resp.body, '<?xml version="1.0" encoding="UTF-8"?>'
'\n<container name="c"><subdir name="US-OK-">'
'<name>US-OK-</name></subdir>'
'<subdir name="US-TX-"><name>US-TX-</name></subdir>'
'<subdir name="US-UT-"><name>US-UT-</name></subdir></container>')
def test_GET_delimiter_xml_with_quotes(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c/<\'sub\' "dir">/object',
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?delimiter=/&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
dom = minidom.parseString(resp.body)
self.assert_(len(dom.getElementsByTagName('container')) == 1)
container = dom.getElementsByTagName('container')[0]
self.assert_(len(container.getElementsByTagName('subdir')) == 1)
subdir = container.getElementsByTagName('subdir')[0]
self.assertEquals(unicode(subdir.attributes['name'].value),
u'<\'sub\' "dir">/')
self.assert_(len(subdir.getElementsByTagName('name')) == 1)
name = subdir.getElementsByTagName('name')[0]
self.assertEquals(unicode(name.childNodes[0].data),
u'<\'sub\' "dir">/')
def test_GET_path(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US/TX', 'US/TX/B', 'US/OK', 'US/OK/B', 'US/UT/A'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?path=US&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(
simplejson.loads(resp.body),
[{"name": "US/OK", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"},
{"name": "US/TX", "hash": "x", "bytes": 0,
"content_type": "text/plain",
"last_modified": "1970-01-01T00:00:01.000000"}])
def test_GET_insufficient_storage(self):
self.controller = container_server.ContainerController(
{'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 507)
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a/c'},
start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
req = Request.blank(
'/sda1/p/a/c',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'})
req.get_response(self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a/c?format=%s' % format,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'path', 'prefix',
'end_marker', 'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
req = Request.blank('/sda1/p/a/c',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_put_auto_create(self):
headers = {'x-timestamp': normalize_timestamp(1),
'x-size': '0',
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e'}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/c/.o',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_delete_auto_create(self):
headers = {'x-timestamp': normalize_timestamp(1)}
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a/.c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
req = Request.blank('/sda1/p/a/.c/.o',
environ={'REQUEST_METHOD': 'DELETE'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a/o',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a/o?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a/o', environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a/o', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEquals(resp.content_type, 'application/xml')
self.assertEquals(resp.charset, 'utf-8')
def test_updating_multiple_container_servers(self):
http_connect_args = []
def fake_http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
class SuccessfulFakeConn(object):
@property
def status(self):
return 200
def getresponse(self):
return self
def read(self):
return ''
captured_args = {'ipaddr': ipaddr, 'port': port,
'device': device, 'partition': partition,
'method': method, 'path': path, 'ssl': ssl,
'headers': headers, 'query_string': query_string}
http_connect_args.append(
dict((k, v) for k, v in captured_args.iteritems()
if v is not None))
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '12345',
'X-Account-Partition': '30',
'X-Account-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Account-Device': 'sdb1, sdf1'})
orig_http_connect = container_server.http_connect
try:
container_server.http_connect = fake_http_connect
req.get_response(self.controller)
finally:
container_server.http_connect = orig_http_connect
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
self.assertEquals(len(http_connect_args), 2)
self.assertEquals(
http_connect_args[0],
{'ipaddr': '1.2.3.4',
'port': '5',
'path': '/a/c',
'device': 'sdb1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': '0000012345.00000',
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
self.assertEquals(
http_connect_args[1],
{'ipaddr': '6.7.8.9',
'port': '10',
'path': '/a/c',
'device': 'sdf1',
'partition': '30',
'method': 'PUT',
'ssl': False,
'headers': HeaderKeyDict({
'x-bytes-used': 0,
'x-delete-timestamp': '0',
'x-object-count': 0,
'x-put-timestamp': '0000012345.00000',
'referer': 'PUT http://localhost/sda1/p/a/c',
'user-agent': 'container-server %d' % os.getpid(),
'x-trans-id': '-'})})
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
container_controller = container_server.ContainerController
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEquals(container_controller(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(container_controller(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(container_controller(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEquals(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.container.server.ContainerController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = container_server.ContainerController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method, new=mock_method):
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = True
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = False
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
if __name__ == '__main__':
unittest.main()
| NeCTAR-RC/swift | test/unit/container/test_server.py | Python | apache-2.0 | 88,183 |
from datetime import date, datetime
from functools import wraps
try:
# PY2
from urllib import quote_plus
except ImportError:
# PY3
from urllib.parse import quote_plus
# parts of URL to be omitted
SKIP_IN_PATH = (None, '', [], ())
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = u','.join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# encode strings to utf-8
if isinstance(value, (type(''), type(u''))):
try:
return value.encode('utf-8')
except UnicodeDecodeError:
# Python 2 and str, no need to re-encode
pass
return str(value)
def _make_path(*parts):
"""
Create a URL string from parts, omit all `None` values and empty strings.
Convert lists nad tuples to comma separated values.
"""
#TODO: maybe only allow some parts to be lists/tuples ?
return '/' + '/'.join(
# preserve ',' and '*' in url for nicer URLs in logs
quote_plus(_escape(p), ',*') for p in parts if p not in SKIP_IN_PATH)
# parameters that apply to all methods
GLOBAL_PARAMS = ('pretty', )
def query_params(*es_query_params):
"""
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument.
"""
def _wrapper(func):
@wraps(func)
def _wrapped(*args, **kwargs):
params = kwargs.pop('params', {})
for p in es_query_params + GLOBAL_PARAMS:
if p in kwargs:
params[p] = _escape(kwargs.pop(p))
# don't treat ignore as other params to avoid escaping
if 'ignore' in kwargs:
params['ignore'] = kwargs.pop('ignore')
return func(*args, params=params, **kwargs)
return _wrapped
return _wrapper
class NamespacedClient(object):
def __init__(self, client):
self.client = client
@property
def transport(self):
return self.client.transport
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/elasticsearch/client/utils.py | Python | agpl-3.0 | 2,382 |
"""
Tests for `_ods`.
"""
# Copyright (C) 2009-2021 Thomas Aglassinger
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import unittest
from tests import _ods, dev_test
class OdsTest(unittest.TestCase):
def test_can_convert_ods_to_csv(self):
source_ods_path = dev_test.path_to_test_data("valid_customers.ods")
target_path = dev_test.path_to_test_result("valid_customers_from__ods.csv")
_ods.main([source_ods_path, target_path])
def test_can_convert_ods_to_rst(self):
source_ods_path = dev_test.path_to_test_data("valid_customers.ods")
target_path = dev_test.path_to_test_result("valid_customers_from__ods.rst")
_ods.main(["--format=rst", source_ods_path, target_path])
def test_fails_on_kinky_file_name(self):
source_ods_path = dev_test.path_to_test_data("valid_customers.ods")
target_path = dev_test.path_to_test_result("kinky_file_name//\\:^$\\::/")
self.assertRaises(SystemExit, _ods.main, [source_ods_path, target_path])
def test_fails_without_command_line_arguments(self):
self.assertRaises(SystemExit, _ods.main, [])
def test_fails_on_broken_sheet(self):
source_ods_path = dev_test.path_to_test_data("valid_customers.ods")
target_path = dev_test.path_to_test_result("valid_customers_from__ods.csv")
self.assertRaises(SystemExit, _ods.main, ["--sheet=x", source_ods_path, target_path])
self.assertRaises(SystemExit, _ods.main, ["--sheet=0", source_ods_path, target_path])
self.assertRaises(SystemExit, _ods.main, ["--sheet=17", source_ods_path, target_path])
if __name__ == "__main__": # pragma: no cover
logging.basicConfig(level=logging.INFO)
unittest.main()
| roskakori/cutplace | tests/test_ods.py | Python | lgpl-3.0 | 2,347 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.translation import ugettext_lazy as _
from filer.models import File
from shuup.admin.base import AdminModule, MenuEntry
from shuup.admin.menu import STOREFRONT_MENU_CATEGORY
from shuup.admin.utils.permissions import get_default_model_permissions
from shuup.admin.utils.urls import admin_url
class MediaModule(AdminModule):
"""
A module for handling site media.
Basically a frontend for the Django-Filer app.
"""
name = _("Media")
def get_urls(self):
return [
admin_url(
"^media/$",
"shuup.admin.modules.media.views.MediaBrowserView",
name="media.browse",
permissions=get_default_model_permissions(File),
),
]
def get_required_permissions(self):
return get_default_model_permissions(File)
def get_menu_entries(self, request):
return [
MenuEntry(
text=_("Media browser"),
icon="fa fa-folder-open",
url="shuup_admin:media.browse",
category=STOREFRONT_MENU_CATEGORY,
ordering=2
),
]
| suutari/shoop | shuup/admin/modules/media/__init__.py | Python | agpl-3.0 | 1,421 |
import time
import datetime
import random
import threading
import slackbot_settings as settings
import slacker
from log import logger
from cron import CronScheduleTime, parse_cron_schedule
def _run_thread(slackclient, cron_schedule_times, channel_ids, messages):
while True:
now = datetime.datetime.now()
logger().info('checking announce thread')
for cron_schedule_time in cron_schedule_times:
logger().debug('checking cron-time: %s', cron_schedule_time)
if cron_schedule_time.is_on(now):
message = random.choice(messages)
logger().info('schedule time found, sending message: \'%s\'', message)
for channel_id in channel_ids:
try:
slackclient.send_message(channel_id, message)
except slacker.Error as e:
logger().error('Error sending Slack message to channel \'%s\': %s', channel_id, e)
break
time.sleep(60)
def start_announce_thread(slackclient):
will_announce = getattr(settings, 'ANNOUNCE_WILL_LAUNCH', False)
if will_announce:
channel_ids = []
if type(settings.ANNOUNCE_CHANNEL_NAMES) is str:
channel_id = slackclient.find_channel_by_name(
settings.ANNOUNCE_CHANNEL_NAMES)
if not channel_id:
raise RuntimeError(
'channel id not found for \'{}\''.format(
settings.ANNOUNCE_CHANNEL_NAMES))
channel_ids.append(channel_id)
else:
for channelName in settings.ANNOUNCE_CHANNEL_NAMES:
channel_id = slackclient.find_channel_by_name(channelName)
if not channel_id:
raise RuntimeError(
'channel id not found for \'{}\''.format(channelName))
channel_ids.append(channel_id)
messages = []
if type(settings.ANNOUNCE_MESSAGES) is str:
messages.append(settings.ANNOUNCE_MESSAGES)
else:
messages.extend(settings.ANNOUNCE_MESSAGES)
cron_schedule_times = []
for cron_schedule_time in settings.ANNOUNCE_CRON_SCHEDULE_TIMES:
if type(cron_schedule_time) is CronScheduleTime:
cron_schedule_times.append(cron_schedule_time)
else:
cron_schedule_times.append(
parse_cron_schedule(cron_schedule_time))
thread = threading.Thread(
target=_run_thread,
args=(
slackclient,
cron_schedule_times,
channel_ids,
messages,
),
daemon=True)
thread.start()
| murrple-1/foodorder-slack | foodorder-slackbot/announcethread.py | Python | mit | 2,765 |
# Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains domain objects for storing page session stats as provided by a
HTTP Archive file (also referred to as HAR).
Selenium and Browsermob-proxy are used for capturing session information, such
as load times and page size statistics. Timing statistics are retrieved directly
from the browser console rather than the proxy server, as the latter is sluggish
and gives inaccurate timings.
"""
import utils
class PageSessionMetrics(object):
"""Contains methods to process stats and provide performance metrics.
page_session_stats is a dictionary containing page load statistics from an
HTTP Archive.
(https://dvcs.w3.org/hg/webperf/raw-file/tip/specs/HAR/Overview.html)
page_session_timings is a dictionary containing metrics associated with page
loading and includes the following keys:
timing: maps to a dict containing the keys (all in milliseconds):
connectEnd: the Unix time the user agent finishes establishing the
connection to the server to retrive the current document.
connectStart: the Unix time before the user agent starts
establishing the connection to the server to retrive the
document.
domainLookupEnd: the Unix time the domain lookup ends.
domainLookupStart: the Unix time the domain lookup starts.
domComplete: the Unix time user agent sets the current document
readiness to "complete".
domInteractive: the Unix time the the parser finished its work on
the main document.
domLoading: the Unix time the DOM started loading.
fetchStart: the Unix time the page began loading.
loadEventEnd: the Unix time the load event of the current document
is completed.
loadEventStart: the Unix time the load event of the current document
is fired.
navigationStart: the Unix time the prompt for unload terminates
on the previous document in the same browsing context.
redirectEnd: the Unix time the last HTTP redirect is completed, that
is when the last byte of the HTTP response has been received.
redirectStart: the Unix time the first HTTP redirect starts.
requestStart: the Unix time the request started.
responseEnd: the Unix time the request finished.
responseStart: the Unix time the response started.
unloadEventEnd: the Unix time the unload event handler finishes.
unloadEventStart: the Unix time the unload event has been thrown.
"""
TIMING_PROPERTIES = [
'connectEnd', 'connectStart', 'domainLookupEnd', 'domainLookupStart',
'domComplete', 'domInteractive', 'domLoading', 'fetchStart',
'loadEventEnd', 'loadEventStart', 'navigationStart', 'redirectEnd',
'redirectStart', 'requestStart', 'responseEnd', 'responseStart',
'unloadEventEnd', 'unloadEventStart'
]
def __init__(self, page_session_stats=None, page_session_timings=None):
self.page_session_stats = page_session_stats
self.page_session_timings = page_session_timings
self.page_load_timings = {}
if self.page_session_timings:
self.page_load_timings = page_session_timings['timing']
self._validate()
self.print_details()
def _validate(self):
"""Validates various properties of a PageSessionMetrics object."""
if not self.page_session_stats and not self.page_session_timings:
raise utils.ValidationError(
'Expected one of page_session_stats or page_session_timings '
'to be provided.')
if self.page_session_stats:
if 'log' not in self.page_session_stats:
raise utils.ValidationError(
'Expected the page load stats to have a \'log\' entry')
if 'entries' not in self.page_session_stats['log']:
raise utils.ValidationError(
'Expected the log entry of the page load stats to include '
'an additional \'entries\' element')
for entry in self.page_session_stats['log']['entries']:
if '_error' in entry['response']:
raise utils.ValidationError(
'Expected a valid server response, found server '
'not reachable.')
if self.get_request_count() == 0:
raise utils.ValidationError(
'Expected the log entry of the page load stats to include '
'a positive number of requests.')
if self.get_total_page_size_bytes() == 0:
raise utils.ValidationError(
'Expected the total size of a page including all its '
'resources to be positive.')
if self.page_session_timings:
for timing_prop in self.TIMING_PROPERTIES:
if timing_prop not in self.page_load_timings:
raise utils.ValidationError(
'Expected the timing entry of the page load timings to '
'include %s property' % timing_prop)
if self.get_page_load_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the page load time to be positive.')
if self.get_dom_ready_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the dom ready time to be positive.')
if self.get_request_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the request time to be positive.')
if self.get_ready_start_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the ready start time to be positive.')
if self.get_redirect_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the redirect time to be positive.')
if self.get_appcache_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the appcache time to be positive.')
if self.get_unload_event_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the unload time to be positive.')
if self.get_lookup_domain_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the domain lookup time to be positive.')
if self.get_connect_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the connect time to be positive.')
if self.get_init_dom_tree_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the init dom tree time to be positive.')
if self.get_load_event_time_millisecs() < 0:
raise utils.ValidationError(
'Expected the load time to be positive.')
def get_request_count(self):
"""Returns the number of requests made prior to the page load
completing.
"""
return len(self.page_session_stats['log']['entries'])
def get_total_page_size_bytes(self):
"""Returns the total size of a page including all of its resources."""
total_size = 0
for entry in self.page_session_stats['log']['entries']:
total_size += int(entry['response']['bodySize'])
return total_size
def _get_duration_millisecs(self, event_end, event_initial):
# Timestamps are in milliseconds.
initial_timestamp = self.page_load_timings[event_initial]
end_timestamp = self.page_load_timings[event_end]
return end_timestamp - initial_timestamp
def get_page_load_time_millisecs(self):
"""Returns the total page load time."""
return self._get_duration_millisecs('loadEventEnd', 'fetchStart')
def get_dom_ready_time_millisecs(self):
"""Returns the time spent constructing the dom tree."""
return self._get_duration_millisecs('domComplete', 'domInteractive')
def get_request_time_millisecs(self):
"""Returns the time spent during request."""
return self._get_duration_millisecs('responseEnd', 'requestStart')
def get_ready_start_time_millisecs(self):
"""Returns the time consumed preparing the new page."""
return self._get_duration_millisecs('fetchStart', 'navigationStart')
def get_redirect_time_millisecs(self):
"""Returns the time spent during redirection."""
return self._get_duration_millisecs('redirectEnd', 'redirectStart')
def get_appcache_time_millisecs(self):
"""Returns the time spent for appcache."""
return self._get_duration_millisecs('domainLookupStart', 'fetchStart')
def get_unload_event_time_millisecs(self):
"""Returns the time spent unloading documents."""
return self._get_duration_millisecs(
'unloadEventEnd', 'unloadEventStart')
def get_lookup_domain_time_millisecs(self):
"""Returns the time spent for the domain name lookup for the current
document.
"""
return self._get_duration_millisecs(
'domainLookupEnd', 'domainLookupStart')
def get_connect_time_millisecs(self):
"""Returns the time spent for establishing the connection to the server
to retrieve the current document.
"""
return self._get_duration_millisecs('connectEnd', 'connectStart')
def get_init_dom_tree_time_millisecs(self):
"""Returns the time from request to completion of DOM loading."""
return self._get_duration_millisecs('domInteractive', 'responseEnd')
def get_load_event_time_millisecs(self):
"""Returns the time spent for completion of the load event of the
current document. The load event is fired when a resource and its
dependent resources have finished loading.
"""
return self._get_duration_millisecs('loadEventEnd', 'loadEventStart')
def print_details(self):
"""Helper function to print details for all the events."""
if self.page_session_stats:
print 'Total number of requests: %d' % self.get_request_count()
print ('Total page size in bytes: %d'
% self.get_total_page_size_bytes())
else:
print 'Page session stats are not available.'
if self.page_session_timings:
print 'Page load time: %d' % self.get_page_load_time_millisecs()
print 'Dom ready time: %d' % self.get_dom_ready_time_millisecs()
print 'Request time: %d' % self.get_request_time_millisecs()
print 'Ready start time: %d' % self.get_ready_start_time_millisecs()
print 'Redirect time: %d' % self.get_redirect_time_millisecs()
print 'Appcache time: %d' % self.get_appcache_time_millisecs()
print ('Unload event time: %d'
% self.get_unload_event_time_millisecs())
print 'DNS query time: %d' % self.get_lookup_domain_time_millisecs()
print ('TCP connection time: %d'
% self.get_connect_time_millisecs())
print ('Init domtree time: %d'
% self.get_init_dom_tree_time_millisecs())
print 'Load event time: %d' % self.get_load_event_time_millisecs()
else:
print 'Page session timings are not available.'
class MultiplePageSessionMetrics(object):
"""Domain object for multiple PageSessionMetrics to provide average
metrics, so as to reduce the variation between statistics obtained during
different page load sessions. This may happen due to various factors like
background processes.
"""
def __init__(self, page_session_metrics):
self.page_metrics = page_session_metrics
self._validate()
def _validate(self):
if not isinstance(self.page_metrics, list):
raise utils.ValidationError(
'Expected page_session_metrics to be a list, '
'received %s' % self.page_metrics)
def get_average_page_load_time_millisecs(self):
"""Returns the average total page load time (in milliseconds)."""
return (sum(item.get_page_load_time_millisecs()
for item in self.page_metrics)) / len(self.page_metrics)
def get_average_dom_ready_time_millisecs(self):
"""Returns the average dom ready time (in milliseconds)."""
return (sum(item.get_dom_ready_time_millisecs()
for item in self.page_metrics)) / len(self.page_metrics)
def get_average_request_time_millisecs(self):
"""Returns the average request time (in milliseconds)."""
return (sum(item.get_request_time_millisecs()
for item in self.page_metrics)) / len(self.page_metrics)
| himanshu-dixit/oppia | core/tests/performance_framework/perf_domain.py | Python | apache-2.0 | 13,677 |
# -*- coding: utf-8 -*-
"""
Authors: Hung-Hsin Chen <chenhh@par.cse.nsysu.edu.tw>
License: GPL v2
"""
import numpy as np
import pandas as pd
def sharpe(series):
"""
Sharpe ratio
note: the numpy std() function is the population estimator
Parameters:
---------------
series: list or numpy.array, ROI series
"""
s = np.asarray(series)
try:
val = s.mean() / s.std()
except FloatingPointError:
# set 0 when standard deviation is zero
val = 0
return val
def sortino_full(series, mar=0):
"""
Sortino ratio, using all periods of the series
Parameters:
---------------
series: list or numpy.array, ROI series
mar: float, minimum acceptable return, usually set to 0
"""
s = np.asarray(series)
mean = s.mean()
semi_std = np.sqrt(((s * ((s - mar) < 0)) ** 2).mean())
try:
val = mean / semi_std
except FloatingPointError:
# set 0 when semi-standard deviation is zero
val = 0
return val, semi_std
def sortino_partial(series, mar=0):
"""
Sortino ratio, using only negative roi periods of the series
Parameters:
---------------
series: list or numpy.array, ROI series
mar: float, minimum acceptable return, usually set to 0
"""
s = np.asarray(series)
mean = s.mean()
n_neg_period = ((s - mar) < 0).sum()
try:
semi_std = np.sqrt(((s * ((s - mar) < 0)) ** 2).sum() / n_neg_period)
val = mean / semi_std
except FloatingPointError:
# set 0 when semi-standard deviation or negative period is zero
val, semi_std = 0, 0
return val, semi_std
def maximum_drawdown(series):
"""
https://en.wikipedia.org/wiki/Drawdown_(economics)
the peak may be zero
e.g.
s= [0, -0.4, -0.2, 0.2]
peak = [0, 0, 0, 0.2]
therefore we don't provide relative percentage of mdd
Parameters:
---------------
series: list or numpy.array, ROI series
"""
s = np.asarray(series)
peak = pd.expanding_max(s)
# absolute drawdown
ad = np.maximum(peak - s, 0)
mad = np.max(ad)
return mad
| chenhh/PySPPortfolio | PySPPortfolio/pysp_portfolio/utils.py | Python | gpl-3.0 | 2,140 |
'''
Copied from pyglet 1.2 lib/x11
Wrapper for X11
Generated with:
tools/genwrappers.py xlib
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('X11')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
XlibSpecificationRelease = 6 # /usr/include/X11/Xlib.h:39
X_PROTOCOL = 11 # /usr/include/X11/X.h:53
X_PROTOCOL_REVISION = 0 # /usr/include/X11/X.h:54
XID = c_ulong # /usr/include/X11/X.h:66
Mask = c_ulong # /usr/include/X11/X.h:70
Atom = c_ulong # /usr/include/X11/X.h:74
VisualID = c_ulong # /usr/include/X11/X.h:76
Time = c_ulong # /usr/include/X11/X.h:77
Window = XID # /usr/include/X11/X.h:96
Drawable = XID # /usr/include/X11/X.h:97
Font = XID # /usr/include/X11/X.h:100
Pixmap = XID # /usr/include/X11/X.h:102
Cursor = XID # /usr/include/X11/X.h:103
Colormap = XID # /usr/include/X11/X.h:104
GContext = XID # /usr/include/X11/X.h:105
KeySym = XID # /usr/include/X11/X.h:106
KeyCode = c_ubyte # /usr/include/X11/X.h:108
None_ = 0 # /usr/include/X11/X.h:115
ParentRelative = 1 # /usr/include/X11/X.h:118
CopyFromParent = 0 # /usr/include/X11/X.h:121
PointerWindow = 0 # /usr/include/X11/X.h:126
InputFocus = 1 # /usr/include/X11/X.h:127
PointerRoot = 1 # /usr/include/X11/X.h:129
AnyPropertyType = 0 # /usr/include/X11/X.h:131
AnyKey = 0 # /usr/include/X11/X.h:133
AnyButton = 0 # /usr/include/X11/X.h:135
AllTemporary = 0 # /usr/include/X11/X.h:137
CurrentTime = 0 # /usr/include/X11/X.h:139
NoSymbol = 0 # /usr/include/X11/X.h:141
NoEventMask = 0 # /usr/include/X11/X.h:150
KeyPressMask = 1 # /usr/include/X11/X.h:151
KeyReleaseMask = 2 # /usr/include/X11/X.h:152
ButtonPressMask = 4 # /usr/include/X11/X.h:153
ButtonReleaseMask = 8 # /usr/include/X11/X.h:154
EnterWindowMask = 16 # /usr/include/X11/X.h:155
LeaveWindowMask = 32 # /usr/include/X11/X.h:156
PointerMotionMask = 64 # /usr/include/X11/X.h:157
PointerMotionHintMask = 128 # /usr/include/X11/X.h:158
Button1MotionMask = 256 # /usr/include/X11/X.h:159
Button2MotionMask = 512 # /usr/include/X11/X.h:160
Button3MotionMask = 1024 # /usr/include/X11/X.h:161
Button4MotionMask = 2048 # /usr/include/X11/X.h:162
Button5MotionMask = 4096 # /usr/include/X11/X.h:163
ButtonMotionMask = 8192 # /usr/include/X11/X.h:164
KeymapStateMask = 16384 # /usr/include/X11/X.h:165
ExposureMask = 32768 # /usr/include/X11/X.h:166
VisibilityChangeMask = 65536 # /usr/include/X11/X.h:167
StructureNotifyMask = 131072 # /usr/include/X11/X.h:168
ResizeRedirectMask = 262144 # /usr/include/X11/X.h:169
SubstructureNotifyMask = 524288 # /usr/include/X11/X.h:170
SubstructureRedirectMask = 1048576 # /usr/include/X11/X.h:171
FocusChangeMask = 2097152 # /usr/include/X11/X.h:172
PropertyChangeMask = 4194304 # /usr/include/X11/X.h:173
ColormapChangeMask = 8388608 # /usr/include/X11/X.h:174
OwnerGrabButtonMask = 16777216 # /usr/include/X11/X.h:175
KeyPress = 2 # /usr/include/X11/X.h:181
KeyRelease = 3 # /usr/include/X11/X.h:182
ButtonPress = 4 # /usr/include/X11/X.h:183
ButtonRelease = 5 # /usr/include/X11/X.h:184
MotionNotify = 6 # /usr/include/X11/X.h:185
EnterNotify = 7 # /usr/include/X11/X.h:186
LeaveNotify = 8 # /usr/include/X11/X.h:187
FocusIn = 9 # /usr/include/X11/X.h:188
FocusOut = 10 # /usr/include/X11/X.h:189
KeymapNotify = 11 # /usr/include/X11/X.h:190
Expose = 12 # /usr/include/X11/X.h:191
GraphicsExpose = 13 # /usr/include/X11/X.h:192
NoExpose = 14 # /usr/include/X11/X.h:193
VisibilityNotify = 15 # /usr/include/X11/X.h:194
CreateNotify = 16 # /usr/include/X11/X.h:195
DestroyNotify = 17 # /usr/include/X11/X.h:196
UnmapNotify = 18 # /usr/include/X11/X.h:197
MapNotify = 19 # /usr/include/X11/X.h:198
MapRequest = 20 # /usr/include/X11/X.h:199
ReparentNotify = 21 # /usr/include/X11/X.h:200
ConfigureNotify = 22 # /usr/include/X11/X.h:201
ConfigureRequest = 23 # /usr/include/X11/X.h:202
GravityNotify = 24 # /usr/include/X11/X.h:203
ResizeRequest = 25 # /usr/include/X11/X.h:204
CirculateNotify = 26 # /usr/include/X11/X.h:205
CirculateRequest = 27 # /usr/include/X11/X.h:206
PropertyNotify = 28 # /usr/include/X11/X.h:207
SelectionClear = 29 # /usr/include/X11/X.h:208
SelectionRequest = 30 # /usr/include/X11/X.h:209
SelectionNotify = 31 # /usr/include/X11/X.h:210
ColormapNotify = 32 # /usr/include/X11/X.h:211
ClientMessage = 33 # /usr/include/X11/X.h:212
MappingNotify = 34 # /usr/include/X11/X.h:213
GenericEvent = 35 # /usr/include/X11/X.h:214
LASTEvent = 36 # /usr/include/X11/X.h:215
ShiftMask = 1 # /usr/include/X11/X.h:221
LockMask = 2 # /usr/include/X11/X.h:222
ControlMask = 4 # /usr/include/X11/X.h:223
Mod1Mask = 8 # /usr/include/X11/X.h:224
Mod2Mask = 16 # /usr/include/X11/X.h:225
Mod3Mask = 32 # /usr/include/X11/X.h:226
Mod4Mask = 64 # /usr/include/X11/X.h:227
Mod5Mask = 128 # /usr/include/X11/X.h:228
ShiftMapIndex = 0 # /usr/include/X11/X.h:233
LockMapIndex = 1 # /usr/include/X11/X.h:234
ControlMapIndex = 2 # /usr/include/X11/X.h:235
Mod1MapIndex = 3 # /usr/include/X11/X.h:236
Mod2MapIndex = 4 # /usr/include/X11/X.h:237
Mod3MapIndex = 5 # /usr/include/X11/X.h:238
Mod4MapIndex = 6 # /usr/include/X11/X.h:239
Mod5MapIndex = 7 # /usr/include/X11/X.h:240
Button1Mask = 256 # /usr/include/X11/X.h:246
Button2Mask = 512 # /usr/include/X11/X.h:247
Button3Mask = 1024 # /usr/include/X11/X.h:248
Button4Mask = 2048 # /usr/include/X11/X.h:249
Button5Mask = 4096 # /usr/include/X11/X.h:250
AnyModifier = 32768 # /usr/include/X11/X.h:252
Button1 = 1 # /usr/include/X11/X.h:259
Button2 = 2 # /usr/include/X11/X.h:260
Button3 = 3 # /usr/include/X11/X.h:261
Button4 = 4 # /usr/include/X11/X.h:262
Button5 = 5 # /usr/include/X11/X.h:263
NotifyNormal = 0 # /usr/include/X11/X.h:267
NotifyGrab = 1 # /usr/include/X11/X.h:268
NotifyUngrab = 2 # /usr/include/X11/X.h:269
NotifyWhileGrabbed = 3 # /usr/include/X11/X.h:270
NotifyHint = 1 # /usr/include/X11/X.h:272
NotifyAncestor = 0 # /usr/include/X11/X.h:276
NotifyVirtual = 1 # /usr/include/X11/X.h:277
NotifyInferior = 2 # /usr/include/X11/X.h:278
NotifyNonlinear = 3 # /usr/include/X11/X.h:279
NotifyNonlinearVirtual = 4 # /usr/include/X11/X.h:280
NotifyPointer = 5 # /usr/include/X11/X.h:281
NotifyPointerRoot = 6 # /usr/include/X11/X.h:282
NotifyDetailNone = 7 # /usr/include/X11/X.h:283
VisibilityUnobscured = 0 # /usr/include/X11/X.h:287
VisibilityPartiallyObscured = 1 # /usr/include/X11/X.h:288
VisibilityFullyObscured = 2 # /usr/include/X11/X.h:289
PlaceOnTop = 0 # /usr/include/X11/X.h:293
PlaceOnBottom = 1 # /usr/include/X11/X.h:294
FamilyInternet = 0 # /usr/include/X11/X.h:298
FamilyDECnet = 1 # /usr/include/X11/X.h:299
FamilyChaos = 2 # /usr/include/X11/X.h:300
FamilyInternet6 = 6 # /usr/include/X11/X.h:301
FamilyServerInterpreted = 5 # /usr/include/X11/X.h:304
PropertyNewValue = 0 # /usr/include/X11/X.h:308
PropertyDelete = 1 # /usr/include/X11/X.h:309
ColormapUninstalled = 0 # /usr/include/X11/X.h:313
ColormapInstalled = 1 # /usr/include/X11/X.h:314
GrabModeSync = 0 # /usr/include/X11/X.h:318
GrabModeAsync = 1 # /usr/include/X11/X.h:319
GrabSuccess = 0 # /usr/include/X11/X.h:323
AlreadyGrabbed = 1 # /usr/include/X11/X.h:324
GrabInvalidTime = 2 # /usr/include/X11/X.h:325
GrabNotViewable = 3 # /usr/include/X11/X.h:326
GrabFrozen = 4 # /usr/include/X11/X.h:327
AsyncPointer = 0 # /usr/include/X11/X.h:331
SyncPointer = 1 # /usr/include/X11/X.h:332
ReplayPointer = 2 # /usr/include/X11/X.h:333
AsyncKeyboard = 3 # /usr/include/X11/X.h:334
SyncKeyboard = 4 # /usr/include/X11/X.h:335
ReplayKeyboard = 5 # /usr/include/X11/X.h:336
AsyncBoth = 6 # /usr/include/X11/X.h:337
SyncBoth = 7 # /usr/include/X11/X.h:338
RevertToParent = 2 # /usr/include/X11/X.h:344
Success = 0 # /usr/include/X11/X.h:350
BadRequest = 1 # /usr/include/X11/X.h:351
BadValue = 2 # /usr/include/X11/X.h:352
BadWindow = 3 # /usr/include/X11/X.h:353
BadPixmap = 4 # /usr/include/X11/X.h:354
BadAtom = 5 # /usr/include/X11/X.h:355
BadCursor = 6 # /usr/include/X11/X.h:356
BadFont = 7 # /usr/include/X11/X.h:357
BadMatch = 8 # /usr/include/X11/X.h:358
BadDrawable = 9 # /usr/include/X11/X.h:359
BadAccess = 10 # /usr/include/X11/X.h:360
BadAlloc = 11 # /usr/include/X11/X.h:369
BadColor = 12 # /usr/include/X11/X.h:370
BadGC = 13 # /usr/include/X11/X.h:371
BadIDChoice = 14 # /usr/include/X11/X.h:372
BadName = 15 # /usr/include/X11/X.h:373
BadLength = 16 # /usr/include/X11/X.h:374
BadImplementation = 17 # /usr/include/X11/X.h:375
FirstExtensionError = 128 # /usr/include/X11/X.h:377
LastExtensionError = 255 # /usr/include/X11/X.h:378
InputOutput = 1 # /usr/include/X11/X.h:387
InputOnly = 2 # /usr/include/X11/X.h:388
CWBackPixmap = 1 # /usr/include/X11/X.h:392
CWBackPixel = 2 # /usr/include/X11/X.h:393
CWBorderPixmap = 4 # /usr/include/X11/X.h:394
CWBorderPixel = 8 # /usr/include/X11/X.h:395
CWBitGravity = 16 # /usr/include/X11/X.h:396
CWWinGravity = 32 # /usr/include/X11/X.h:397
CWBackingStore = 64 # /usr/include/X11/X.h:398
CWBackingPlanes = 128 # /usr/include/X11/X.h:399
CWBackingPixel = 256 # /usr/include/X11/X.h:400
CWOverrideRedirect = 512 # /usr/include/X11/X.h:401
CWSaveUnder = 1024 # /usr/include/X11/X.h:402
CWEventMask = 2048 # /usr/include/X11/X.h:403
CWDontPropagate = 4096 # /usr/include/X11/X.h:404
CWColormap = 8192 # /usr/include/X11/X.h:405
CWCursor = 16384 # /usr/include/X11/X.h:406
CWX = 1 # /usr/include/X11/X.h:410
CWY = 2 # /usr/include/X11/X.h:411
CWWidth = 4 # /usr/include/X11/X.h:412
CWHeight = 8 # /usr/include/X11/X.h:413
CWBorderWidth = 16 # /usr/include/X11/X.h:414
CWSibling = 32 # /usr/include/X11/X.h:415
CWStackMode = 64 # /usr/include/X11/X.h:416
ForgetGravity = 0 # /usr/include/X11/X.h:421
NorthWestGravity = 1 # /usr/include/X11/X.h:422
NorthGravity = 2 # /usr/include/X11/X.h:423
NorthEastGravity = 3 # /usr/include/X11/X.h:424
WestGravity = 4 # /usr/include/X11/X.h:425
CenterGravity = 5 # /usr/include/X11/X.h:426
EastGravity = 6 # /usr/include/X11/X.h:427
SouthWestGravity = 7 # /usr/include/X11/X.h:428
SouthGravity = 8 # /usr/include/X11/X.h:429
SouthEastGravity = 9 # /usr/include/X11/X.h:430
StaticGravity = 10 # /usr/include/X11/X.h:431
UnmapGravity = 0 # /usr/include/X11/X.h:435
NotUseful = 0 # /usr/include/X11/X.h:439
WhenMapped = 1 # /usr/include/X11/X.h:440
Always = 2 # /usr/include/X11/X.h:441
IsUnmapped = 0 # /usr/include/X11/X.h:445
IsUnviewable = 1 # /usr/include/X11/X.h:446
IsViewable = 2 # /usr/include/X11/X.h:447
SetModeInsert = 0 # /usr/include/X11/X.h:451
SetModeDelete = 1 # /usr/include/X11/X.h:452
DestroyAll = 0 # /usr/include/X11/X.h:456
RetainPermanent = 1 # /usr/include/X11/X.h:457
RetainTemporary = 2 # /usr/include/X11/X.h:458
Above = 0 # /usr/include/X11/X.h:462
Below = 1 # /usr/include/X11/X.h:463
TopIf = 2 # /usr/include/X11/X.h:464
BottomIf = 3 # /usr/include/X11/X.h:465
Opposite = 4 # /usr/include/X11/X.h:466
RaiseLowest = 0 # /usr/include/X11/X.h:470
LowerHighest = 1 # /usr/include/X11/X.h:471
PropModeReplace = 0 # /usr/include/X11/X.h:475
PropModePrepend = 1 # /usr/include/X11/X.h:476
PropModeAppend = 2 # /usr/include/X11/X.h:477
GXclear = 0 # /usr/include/X11/X.h:485
GXand = 1 # /usr/include/X11/X.h:486
GXandReverse = 2 # /usr/include/X11/X.h:487
GXcopy = 3 # /usr/include/X11/X.h:488
GXandInverted = 4 # /usr/include/X11/X.h:489
GXnoop = 5 # /usr/include/X11/X.h:490
GXxor = 6 # /usr/include/X11/X.h:491
GXor = 7 # /usr/include/X11/X.h:492
GXnor = 8 # /usr/include/X11/X.h:493
GXequiv = 9 # /usr/include/X11/X.h:494
GXinvert = 10 # /usr/include/X11/X.h:495
GXorReverse = 11 # /usr/include/X11/X.h:496
GXcopyInverted = 12 # /usr/include/X11/X.h:497
GXorInverted = 13 # /usr/include/X11/X.h:498
GXnand = 14 # /usr/include/X11/X.h:499
GXset = 15 # /usr/include/X11/X.h:500
LineSolid = 0 # /usr/include/X11/X.h:504
LineOnOffDash = 1 # /usr/include/X11/X.h:505
LineDoubleDash = 2 # /usr/include/X11/X.h:506
CapNotLast = 0 # /usr/include/X11/X.h:510
CapButt = 1 # /usr/include/X11/X.h:511
CapRound = 2 # /usr/include/X11/X.h:512
CapProjecting = 3 # /usr/include/X11/X.h:513
JoinMiter = 0 # /usr/include/X11/X.h:517
JoinRound = 1 # /usr/include/X11/X.h:518
JoinBevel = 2 # /usr/include/X11/X.h:519
FillSolid = 0 # /usr/include/X11/X.h:523
FillTiled = 1 # /usr/include/X11/X.h:524
FillStippled = 2 # /usr/include/X11/X.h:525
FillOpaqueStippled = 3 # /usr/include/X11/X.h:526
EvenOddRule = 0 # /usr/include/X11/X.h:530
WindingRule = 1 # /usr/include/X11/X.h:531
ClipByChildren = 0 # /usr/include/X11/X.h:535
IncludeInferiors = 1 # /usr/include/X11/X.h:536
Unsorted = 0 # /usr/include/X11/X.h:540
YSorted = 1 # /usr/include/X11/X.h:541
YXSorted = 2 # /usr/include/X11/X.h:542
YXBanded = 3 # /usr/include/X11/X.h:543
CoordModeOrigin = 0 # /usr/include/X11/X.h:547
CoordModePrevious = 1 # /usr/include/X11/X.h:548
Complex = 0 # /usr/include/X11/X.h:552
Nonconvex = 1 # /usr/include/X11/X.h:553
Convex = 2 # /usr/include/X11/X.h:554
ArcChord = 0 # /usr/include/X11/X.h:558
ArcPieSlice = 1 # /usr/include/X11/X.h:559
GCFunction = 1 # /usr/include/X11/X.h:564
GCPlaneMask = 2 # /usr/include/X11/X.h:565
GCForeground = 4 # /usr/include/X11/X.h:566
GCBackground = 8 # /usr/include/X11/X.h:567
GCLineWidth = 16 # /usr/include/X11/X.h:568
GCLineStyle = 32 # /usr/include/X11/X.h:569
GCCapStyle = 64 # /usr/include/X11/X.h:570
GCJoinStyle = 128 # /usr/include/X11/X.h:571
GCFillStyle = 256 # /usr/include/X11/X.h:572
GCFillRule = 512 # /usr/include/X11/X.h:573
GCTile = 1024 # /usr/include/X11/X.h:574
GCStipple = 2048 # /usr/include/X11/X.h:575
GCTileStipXOrigin = 4096 # /usr/include/X11/X.h:576
GCTileStipYOrigin = 8192 # /usr/include/X11/X.h:577
GCFont = 16384 # /usr/include/X11/X.h:578
GCSubwindowMode = 32768 # /usr/include/X11/X.h:579
GCGraphicsExposures = 65536 # /usr/include/X11/X.h:580
GCClipXOrigin = 131072 # /usr/include/X11/X.h:581
GCClipYOrigin = 262144 # /usr/include/X11/X.h:582
GCClipMask = 524288 # /usr/include/X11/X.h:583
GCDashOffset = 1048576 # /usr/include/X11/X.h:584
GCDashList = 2097152 # /usr/include/X11/X.h:585
GCArcMode = 4194304 # /usr/include/X11/X.h:586
GCLastBit = 22 # /usr/include/X11/X.h:588
FontLeftToRight = 0 # /usr/include/X11/X.h:595
FontRightToLeft = 1 # /usr/include/X11/X.h:596
FontChange = 255 # /usr/include/X11/X.h:598
XYBitmap = 0 # /usr/include/X11/X.h:606
XYPixmap = 1 # /usr/include/X11/X.h:607
ZPixmap = 2 # /usr/include/X11/X.h:608
AllocNone = 0 # /usr/include/X11/X.h:616
AllocAll = 1 # /usr/include/X11/X.h:617
DoRed = 1 # /usr/include/X11/X.h:622
DoGreen = 2 # /usr/include/X11/X.h:623
DoBlue = 4 # /usr/include/X11/X.h:624
CursorShape = 0 # /usr/include/X11/X.h:632
TileShape = 1 # /usr/include/X11/X.h:633
StippleShape = 2 # /usr/include/X11/X.h:634
AutoRepeatModeOff = 0 # /usr/include/X11/X.h:640
AutoRepeatModeOn = 1 # /usr/include/X11/X.h:641
AutoRepeatModeDefault = 2 # /usr/include/X11/X.h:642
LedModeOff = 0 # /usr/include/X11/X.h:644
LedModeOn = 1 # /usr/include/X11/X.h:645
KBKeyClickPercent = 1 # /usr/include/X11/X.h:649
KBBellPercent = 2 # /usr/include/X11/X.h:650
KBBellPitch = 4 # /usr/include/X11/X.h:651
KBBellDuration = 8 # /usr/include/X11/X.h:652
KBLed = 16 # /usr/include/X11/X.h:653
KBLedMode = 32 # /usr/include/X11/X.h:654
KBKey = 64 # /usr/include/X11/X.h:655
KBAutoRepeatMode = 128 # /usr/include/X11/X.h:656
MappingSuccess = 0 # /usr/include/X11/X.h:658
MappingBusy = 1 # /usr/include/X11/X.h:659
MappingFailed = 2 # /usr/include/X11/X.h:660
MappingModifier = 0 # /usr/include/X11/X.h:662
MappingKeyboard = 1 # /usr/include/X11/X.h:663
MappingPointer = 2 # /usr/include/X11/X.h:664
DontPreferBlanking = 0 # /usr/include/X11/X.h:670
PreferBlanking = 1 # /usr/include/X11/X.h:671
DefaultBlanking = 2 # /usr/include/X11/X.h:672
DisableScreenSaver = 0 # /usr/include/X11/X.h:674
DisableScreenInterval = 0 # /usr/include/X11/X.h:675
DontAllowExposures = 0 # /usr/include/X11/X.h:677
AllowExposures = 1 # /usr/include/X11/X.h:678
DefaultExposures = 2 # /usr/include/X11/X.h:679
ScreenSaverReset = 0 # /usr/include/X11/X.h:683
ScreenSaverActive = 1 # /usr/include/X11/X.h:684
HostInsert = 0 # /usr/include/X11/X.h:692
HostDelete = 1 # /usr/include/X11/X.h:693
EnableAccess = 1 # /usr/include/X11/X.h:697
DisableAccess = 0 # /usr/include/X11/X.h:698
StaticGray = 0 # /usr/include/X11/X.h:704
GrayScale = 1 # /usr/include/X11/X.h:705
StaticColor = 2 # /usr/include/X11/X.h:706
PseudoColor = 3 # /usr/include/X11/X.h:707
TrueColor = 4 # /usr/include/X11/X.h:708
DirectColor = 5 # /usr/include/X11/X.h:709
LSBFirst = 0 # /usr/include/X11/X.h:714
MSBFirst = 1 # /usr/include/X11/X.h:715
# /usr/include/X11/Xlib.h:73
_Xmblen = _lib._Xmblen
_Xmblen.restype = c_int
_Xmblen.argtypes = [c_char_p, c_int]
X_HAVE_UTF8_STRING = 1 # /usr/include/X11/Xlib.h:85
XPointer = c_char_p # /usr/include/X11/Xlib.h:87
Bool = c_int # /usr/include/X11/Xlib.h:89
Status = c_int # /usr/include/X11/Xlib.h:90
True_ = 1 # /usr/include/X11/Xlib.h:91
False_ = 0 # /usr/include/X11/Xlib.h:92
QueuedAlready = 0 # /usr/include/X11/Xlib.h:94
QueuedAfterReading = 1 # /usr/include/X11/Xlib.h:95
QueuedAfterFlush = 2 # /usr/include/X11/Xlib.h:96
class struct__XExtData(Structure):
__slots__ = [
'number',
'next',
'free_private',
'private_data',
]
struct__XExtData._fields_ = [
('number', c_int),
('next', POINTER(struct__XExtData)),
('free_private', POINTER(CFUNCTYPE(c_int, POINTER(struct__XExtData)))),
('private_data', XPointer),
]
XExtData = struct__XExtData # /usr/include/X11/Xlib.h:166
class struct_anon_15(Structure):
__slots__ = [
'extension',
'major_opcode',
'first_event',
'first_error',
]
struct_anon_15._fields_ = [
('extension', c_int),
('major_opcode', c_int),
('first_event', c_int),
('first_error', c_int),
]
XExtCodes = struct_anon_15 # /usr/include/X11/Xlib.h:176
class struct_anon_16(Structure):
__slots__ = [
'depth',
'bits_per_pixel',
'scanline_pad',
]
struct_anon_16._fields_ = [
('depth', c_int),
('bits_per_pixel', c_int),
('scanline_pad', c_int),
]
XPixmapFormatValues = struct_anon_16 # /usr/include/X11/Xlib.h:186
class struct_anon_17(Structure):
__slots__ = [
'function',
'plane_mask',
'foreground',
'background',
'line_width',
'line_style',
'cap_style',
'join_style',
'fill_style',
'fill_rule',
'arc_mode',
'tile',
'stipple',
'ts_x_origin',
'ts_y_origin',
'font',
'subwindow_mode',
'graphics_exposures',
'clip_x_origin',
'clip_y_origin',
'clip_mask',
'dash_offset',
'dashes',
]
struct_anon_17._fields_ = [
('function', c_int),
('plane_mask', c_ulong),
('foreground', c_ulong),
('background', c_ulong),
('line_width', c_int),
('line_style', c_int),
('cap_style', c_int),
('join_style', c_int),
('fill_style', c_int),
('fill_rule', c_int),
('arc_mode', c_int),
('tile', Pixmap),
('stipple', Pixmap),
('ts_x_origin', c_int),
('ts_y_origin', c_int),
('font', Font),
('subwindow_mode', c_int),
('graphics_exposures', c_int),
('clip_x_origin', c_int),
('clip_y_origin', c_int),
('clip_mask', Pixmap),
('dash_offset', c_int),
('dashes', c_char),
]
XGCValues = struct_anon_17 # /usr/include/X11/Xlib.h:218
class struct__XGC(Structure):
__slots__ = [
]
struct__XGC._fields_ = [
('_opaque_struct', c_int)
]
class struct__XGC(Structure):
__slots__ = [
]
struct__XGC._fields_ = [
('_opaque_struct', c_int)
]
GC = POINTER(struct__XGC) # /usr/include/X11/Xlib.h:233
class struct_anon_18(Structure):
__slots__ = [
'ext_data',
'visualid',
'class',
'red_mask',
'green_mask',
'blue_mask',
'bits_per_rgb',
'map_entries',
]
struct_anon_18._fields_ = [
('ext_data', POINTER(XExtData)),
('visualid', VisualID),
('class', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('bits_per_rgb', c_int),
('map_entries', c_int),
]
Visual = struct_anon_18 # /usr/include/X11/Xlib.h:249
class struct_anon_19(Structure):
__slots__ = [
'depth',
'nvisuals',
'visuals',
]
struct_anon_19._fields_ = [
('depth', c_int),
('nvisuals', c_int),
('visuals', POINTER(Visual)),
]
Depth = struct_anon_19 # /usr/include/X11/Xlib.h:258
class struct_anon_20(Structure):
__slots__ = [
'ext_data',
'display',
'root',
'width',
'height',
'mwidth',
'mheight',
'ndepths',
'depths',
'root_depth',
'root_visual',
'default_gc',
'cmap',
'white_pixel',
'black_pixel',
'max_maps',
'min_maps',
'backing_store',
'save_unders',
'root_input_mask',
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
struct_anon_20._fields_ = [
('ext_data', POINTER(XExtData)),
('display', POINTER(struct__XDisplay)),
('root', Window),
('width', c_int),
('height', c_int),
('mwidth', c_int),
('mheight', c_int),
('ndepths', c_int),
('depths', POINTER(Depth)),
('root_depth', c_int),
('root_visual', POINTER(Visual)),
('default_gc', GC),
('cmap', Colormap),
('white_pixel', c_ulong),
('black_pixel', c_ulong),
('max_maps', c_int),
('min_maps', c_int),
('backing_store', c_int),
('save_unders', c_int),
('root_input_mask', c_long),
]
Screen = struct_anon_20 # /usr/include/X11/Xlib.h:286
class struct_anon_21(Structure):
__slots__ = [
'ext_data',
'depth',
'bits_per_pixel',
'scanline_pad',
]
struct_anon_21._fields_ = [
('ext_data', POINTER(XExtData)),
('depth', c_int),
('bits_per_pixel', c_int),
('scanline_pad', c_int),
]
ScreenFormat = struct_anon_21 # /usr/include/X11/Xlib.h:296
class struct_anon_22(Structure):
__slots__ = [
'background_pixmap',
'background_pixel',
'border_pixmap',
'border_pixel',
'bit_gravity',
'win_gravity',
'backing_store',
'backing_planes',
'backing_pixel',
'save_under',
'event_mask',
'do_not_propagate_mask',
'override_redirect',
'colormap',
'cursor',
]
struct_anon_22._fields_ = [
('background_pixmap', Pixmap),
('background_pixel', c_ulong),
('border_pixmap', Pixmap),
('border_pixel', c_ulong),
('bit_gravity', c_int),
('win_gravity', c_int),
('backing_store', c_int),
('backing_planes', c_ulong),
('backing_pixel', c_ulong),
('save_under', c_int),
('event_mask', c_long),
('do_not_propagate_mask', c_long),
('override_redirect', c_int),
('colormap', Colormap),
('cursor', Cursor),
]
XSetWindowAttributes = struct_anon_22 # /usr/include/X11/Xlib.h:317
class struct_anon_23(Structure):
__slots__ = [
'x',
'y',
'width',
'height',
'border_width',
'depth',
'visual',
'root',
'class',
'bit_gravity',
'win_gravity',
'backing_store',
'backing_planes',
'backing_pixel',
'save_under',
'colormap',
'map_installed',
'map_state',
'all_event_masks',
'your_event_mask',
'do_not_propagate_mask',
'override_redirect',
'screen',
]
struct_anon_23._fields_ = [
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('border_width', c_int),
('depth', c_int),
('visual', POINTER(Visual)),
('root', Window),
('class', c_int),
('bit_gravity', c_int),
('win_gravity', c_int),
('backing_store', c_int),
('backing_planes', c_ulong),
('backing_pixel', c_ulong),
('save_under', c_int),
('colormap', Colormap),
('map_installed', c_int),
('map_state', c_int),
('all_event_masks', c_long),
('your_event_mask', c_long),
('do_not_propagate_mask', c_long),
('override_redirect', c_int),
('screen', POINTER(Screen)),
]
XWindowAttributes = struct_anon_23 # /usr/include/X11/Xlib.h:345
class struct_anon_24(Structure):
__slots__ = [
'family',
'length',
'address',
]
struct_anon_24._fields_ = [
('family', c_int),
('length', c_int),
('address', c_char_p),
]
XHostAddress = struct_anon_24 # /usr/include/X11/Xlib.h:356
class struct_anon_25(Structure):
__slots__ = [
'typelength',
'valuelength',
'type',
'value',
]
struct_anon_25._fields_ = [
('typelength', c_int),
('valuelength', c_int),
('type', c_char_p),
('value', c_char_p),
]
XServerInterpretedAddress = struct_anon_25 # /usr/include/X11/Xlib.h:366
class struct__XImage(Structure):
__slots__ = [
'width',
'height',
'xoffset',
'format',
'data',
'byte_order',
'bitmap_unit',
'bitmap_bit_order',
'bitmap_pad',
'depth',
'bytes_per_line',
'bits_per_pixel',
'red_mask',
'green_mask',
'blue_mask',
'obdata',
'f',
]
class struct_funcs(Structure):
__slots__ = [
'create_image',
'destroy_image',
'get_pixel',
'put_pixel',
'sub_image',
'add_pixel',
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
struct_funcs._fields_ = [
('create_image',
POINTER(
CFUNCTYPE(
POINTER(struct__XImage),
POINTER(struct__XDisplay),
POINTER(Visual),
c_uint,
c_int,
c_int,
c_char_p,
c_uint,
c_uint,
c_int,
c_int))),
('destroy_image',
POINTER(
CFUNCTYPE(
c_int,
POINTER(struct__XImage)))),
('get_pixel',
POINTER(
CFUNCTYPE(
c_ulong,
POINTER(struct__XImage),
c_int,
c_int))),
('put_pixel',
POINTER(
CFUNCTYPE(
c_int,
POINTER(struct__XImage),
c_int,
c_int,
c_ulong))),
('sub_image',
POINTER(
CFUNCTYPE(
POINTER(struct__XImage),
POINTER(struct__XImage),
c_int,
c_int,
c_uint,
c_uint))),
('add_pixel',
POINTER(
CFUNCTYPE(
c_int,
POINTER(struct__XImage),
c_long))),
]
struct__XImage._fields_ = [
('width', c_int),
('height', c_int),
('xoffset', c_int),
('format', c_int),
('data', c_char_p),
('byte_order', c_int),
('bitmap_unit', c_int),
('bitmap_bit_order', c_int),
('bitmap_pad', c_int),
('depth', c_int),
('bytes_per_line', c_int),
('bits_per_pixel', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('obdata', XPointer),
('f', struct_funcs),
]
XImage = struct__XImage # /usr/include/X11/Xlib.h:405
class struct_anon_26(Structure):
__slots__ = [
'x',
'y',
'width',
'height',
'border_width',
'sibling',
'stack_mode',
]
struct_anon_26._fields_ = [
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('border_width', c_int),
('sibling', Window),
('stack_mode', c_int),
]
XWindowChanges = struct_anon_26 # /usr/include/X11/Xlib.h:416
class struct_anon_27(Structure):
__slots__ = [
'pixel',
'red',
'green',
'blue',
'flags',
'pad',
]
struct_anon_27._fields_ = [
('pixel', c_ulong),
('red', c_ushort),
('green', c_ushort),
('blue', c_ushort),
('flags', c_char),
('pad', c_char),
]
XColor = struct_anon_27 # /usr/include/X11/Xlib.h:426
class struct_anon_28(Structure):
__slots__ = [
'x1',
'y1',
'x2',
'y2',
]
struct_anon_28._fields_ = [
('x1', c_short),
('y1', c_short),
('x2', c_short),
('y2', c_short),
]
XSegment = struct_anon_28 # /usr/include/X11/Xlib.h:435
class struct_anon_29(Structure):
__slots__ = [
'x',
'y',
]
struct_anon_29._fields_ = [
('x', c_short),
('y', c_short),
]
XPoint = struct_anon_29 # /usr/include/X11/Xlib.h:439
class struct_anon_30(Structure):
__slots__ = [
'x',
'y',
'width',
'height',
]
struct_anon_30._fields_ = [
('x', c_short),
('y', c_short),
('width', c_ushort),
('height', c_ushort),
]
XRectangle = struct_anon_30 # /usr/include/X11/Xlib.h:444
class struct_anon_31(Structure):
__slots__ = [
'x',
'y',
'width',
'height',
'angle1',
'angle2',
]
struct_anon_31._fields_ = [
('x', c_short),
('y', c_short),
('width', c_ushort),
('height', c_ushort),
('angle1', c_short),
('angle2', c_short),
]
XArc = struct_anon_31 # /usr/include/X11/Xlib.h:450
class struct_anon_32(Structure):
__slots__ = [
'key_click_percent',
'bell_percent',
'bell_pitch',
'bell_duration',
'led',
'led_mode',
'key',
'auto_repeat_mode',
]
struct_anon_32._fields_ = [
('key_click_percent', c_int),
('bell_percent', c_int),
('bell_pitch', c_int),
('bell_duration', c_int),
('led', c_int),
('led_mode', c_int),
('key', c_int),
('auto_repeat_mode', c_int),
]
XKeyboardControl = struct_anon_32 # /usr/include/X11/Xlib.h:464
class struct_anon_33(Structure):
__slots__ = [
'key_click_percent',
'bell_percent',
'bell_pitch',
'bell_duration',
'led_mask',
'global_auto_repeat',
'auto_repeats',
]
struct_anon_33._fields_ = [
('key_click_percent', c_int),
('bell_percent', c_int),
('bell_pitch', c_uint),
('bell_duration', c_uint),
('led_mask', c_ulong),
('global_auto_repeat', c_int),
('auto_repeats', c_char * 32),
]
XKeyboardState = struct_anon_33 # /usr/include/X11/Xlib.h:475
class struct_anon_34(Structure):
__slots__ = [
'time',
'x',
'y',
]
struct_anon_34._fields_ = [
('time', Time),
('x', c_short),
('y', c_short),
]
XTimeCoord = struct_anon_34 # /usr/include/X11/Xlib.h:482
class struct_anon_35(Structure):
__slots__ = [
'max_keypermod',
'modifiermap',
]
struct_anon_35._fields_ = [
('max_keypermod', c_int),
('modifiermap', POINTER(KeyCode)),
]
XModifierKeymap = struct_anon_35 # /usr/include/X11/Xlib.h:489
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
Display = struct__XDisplay # /usr/include/X11/Xlib.h:498
class struct_anon_36(Structure):
__slots__ = [
'ext_data',
'private1',
'fd',
'private2',
'proto_major_version',
'proto_minor_version',
'vendor',
'private3',
'private4',
'private5',
'private6',
'resource_alloc',
'byte_order',
'bitmap_unit',
'bitmap_pad',
'bitmap_bit_order',
'nformats',
'pixmap_format',
'private8',
'release',
'private9',
'private10',
'qlen',
'last_request_read',
'request',
'private11',
'private12',
'private13',
'private14',
'max_request_size',
'db',
'private15',
'display_name',
'default_screen',
'nscreens',
'screens',
'motion_buffer',
'private16',
'min_keycode',
'max_keycode',
'private17',
'private18',
'private19',
'xdefaults',
]
class struct__XPrivate(Structure):
__slots__ = [
]
struct__XPrivate._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
class struct__XPrivate(Structure):
__slots__ = [
]
struct__XPrivate._fields_ = [
('_opaque_struct', c_int)
]
class struct__XPrivate(Structure):
__slots__ = [
]
struct__XPrivate._fields_ = [
('_opaque_struct', c_int)
]
class struct__XrmHashBucketRec(Structure):
__slots__ = [
]
struct__XrmHashBucketRec._fields_ = [
('_opaque_struct', c_int)
]
class struct__XDisplay(Structure):
__slots__ = [
]
struct__XDisplay._fields_ = [
('_opaque_struct', c_int)
]
struct_anon_36._fields_ = [
('ext_data', POINTER(XExtData)),
('private1', POINTER(struct__XPrivate)),
('fd', c_int),
('private2', c_int),
('proto_major_version', c_int),
('proto_minor_version', c_int),
('vendor', c_char_p),
('private3', XID),
('private4', XID),
('private5', XID),
('private6', c_int),
('resource_alloc', POINTER(CFUNCTYPE(XID, POINTER(struct__XDisplay)))),
('byte_order', c_int),
('bitmap_unit', c_int),
('bitmap_pad', c_int),
('bitmap_bit_order', c_int),
('nformats', c_int),
('pixmap_format', POINTER(ScreenFormat)),
('private8', c_int),
('release', c_int),
('private9', POINTER(struct__XPrivate)),
('private10', POINTER(struct__XPrivate)),
('qlen', c_int),
('last_request_read', c_ulong),
('request', c_ulong),
('private11', XPointer),
('private12', XPointer),
('private13', XPointer),
('private14', XPointer),
('max_request_size', c_uint),
('db', POINTER(struct__XrmHashBucketRec)),
('private15', POINTER(CFUNCTYPE(c_int, POINTER(struct__XDisplay)))),
('display_name', c_char_p),
('default_screen', c_int),
('nscreens', c_int),
('screens', POINTER(Screen)),
('motion_buffer', c_ulong),
('private16', c_ulong),
('min_keycode', c_int),
('max_keycode', c_int),
('private17', XPointer),
('private18', XPointer),
('private19', c_int),
('xdefaults', c_char_p),
]
_XPrivDisplay = POINTER(struct_anon_36) # /usr/include/X11/Xlib.h:561
class struct_anon_37(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'keycode',
'same_screen',
]
struct_anon_37._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('keycode', c_uint),
('same_screen', c_int),
]
XKeyEvent = struct_anon_37 # /usr/include/X11/Xlib.h:582
XKeyPressedEvent = XKeyEvent # /usr/include/X11/Xlib.h:583
XKeyReleasedEvent = XKeyEvent # /usr/include/X11/Xlib.h:584
class struct_anon_38(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'button',
'same_screen',
]
struct_anon_38._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('button', c_uint),
('same_screen', c_int),
]
XButtonEvent = struct_anon_38 # /usr/include/X11/Xlib.h:600
XButtonPressedEvent = XButtonEvent # /usr/include/X11/Xlib.h:601
XButtonReleasedEvent = XButtonEvent # /usr/include/X11/Xlib.h:602
class struct_anon_39(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'state',
'is_hint',
'same_screen',
]
struct_anon_39._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('state', c_uint),
('is_hint', c_char),
('same_screen', c_int),
]
XMotionEvent = struct_anon_39 # /usr/include/X11/Xlib.h:618
XPointerMovedEvent = XMotionEvent # /usr/include/X11/Xlib.h:619
class struct_anon_40(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'root',
'subwindow',
'time',
'x',
'y',
'x_root',
'y_root',
'mode',
'detail',
'same_screen',
'focus',
'state',
]
struct_anon_40._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('root', Window),
('subwindow', Window),
('time', Time),
('x', c_int),
('y', c_int),
('x_root', c_int),
('y_root', c_int),
('mode', c_int),
('detail', c_int),
('same_screen', c_int),
('focus', c_int),
('state', c_uint),
]
XCrossingEvent = struct_anon_40 # /usr/include/X11/Xlib.h:641
XEnterWindowEvent = XCrossingEvent # /usr/include/X11/Xlib.h:642
XLeaveWindowEvent = XCrossingEvent # /usr/include/X11/Xlib.h:643
class struct_anon_41(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'mode',
'detail',
]
struct_anon_41._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('mode', c_int),
('detail', c_int),
]
XFocusChangeEvent = struct_anon_41 # /usr/include/X11/Xlib.h:659
XFocusInEvent = XFocusChangeEvent # /usr/include/X11/Xlib.h:660
XFocusOutEvent = XFocusChangeEvent # /usr/include/X11/Xlib.h:661
class struct_anon_42(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'key_vector',
]
struct_anon_42._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('key_vector', c_char * 32),
]
XKeymapEvent = struct_anon_42 # /usr/include/X11/Xlib.h:671
class struct_anon_43(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'x',
'y',
'width',
'height',
'count',
]
struct_anon_43._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
XExposeEvent = struct_anon_43 # /usr/include/X11/Xlib.h:682
class struct_anon_44(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'x',
'y',
'width',
'height',
'count',
'major_code',
'minor_code',
]
struct_anon_44._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', Drawable),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
('major_code', c_int),
('minor_code', c_int),
]
XGraphicsExposeEvent = struct_anon_44 # /usr/include/X11/Xlib.h:695
class struct_anon_45(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'major_code',
'minor_code',
]
struct_anon_45._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', Drawable),
('major_code', c_int),
('minor_code', c_int),
]
XNoExposeEvent = struct_anon_45 # /usr/include/X11/Xlib.h:705
class struct_anon_46(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'state',
]
struct_anon_46._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('state', c_int),
]
XVisibilityEvent = struct_anon_46 # /usr/include/X11/Xlib.h:714
class struct_anon_47(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'parent',
'window',
'x',
'y',
'width',
'height',
'border_width',
'override_redirect',
]
struct_anon_47._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('parent', Window),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('border_width', c_int),
('override_redirect', c_int),
]
XCreateWindowEvent = struct_anon_47 # /usr/include/X11/Xlib.h:727
class struct_anon_48(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
]
struct_anon_48._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
]
XDestroyWindowEvent = struct_anon_48 # /usr/include/X11/Xlib.h:736
class struct_anon_49(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
'from_configure',
]
struct_anon_49._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('from_configure', c_int),
]
XUnmapEvent = struct_anon_49 # /usr/include/X11/Xlib.h:746
class struct_anon_50(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
'override_redirect',
]
struct_anon_50._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('override_redirect', c_int),
]
XMapEvent = struct_anon_50 # /usr/include/X11/Xlib.h:756
class struct_anon_51(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'parent',
'window',
]
struct_anon_51._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('parent', Window),
('window', Window),
]
XMapRequestEvent = struct_anon_51 # /usr/include/X11/Xlib.h:765
class struct_anon_52(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
'parent',
'x',
'y',
'override_redirect',
]
struct_anon_52._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('parent', Window),
('x', c_int),
('y', c_int),
('override_redirect', c_int),
]
XReparentEvent = struct_anon_52 # /usr/include/X11/Xlib.h:777
class struct_anon_53(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
'x',
'y',
'width',
'height',
'border_width',
'above',
'override_redirect',
]
struct_anon_53._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('border_width', c_int),
('above', Window),
('override_redirect', c_int),
]
XConfigureEvent = struct_anon_53 # /usr/include/X11/Xlib.h:791
class struct_anon_54(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
'x',
'y',
]
struct_anon_54._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('x', c_int),
('y', c_int),
]
XGravityEvent = struct_anon_54 # /usr/include/X11/Xlib.h:801
class struct_anon_55(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'width',
'height',
]
struct_anon_55._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('width', c_int),
('height', c_int),
]
XResizeRequestEvent = struct_anon_55 # /usr/include/X11/Xlib.h:810
class struct_anon_56(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'parent',
'window',
'x',
'y',
'width',
'height',
'border_width',
'above',
'detail',
'value_mask',
]
struct_anon_56._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('parent', Window),
('window', Window),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('border_width', c_int),
('above', Window),
('detail', c_int),
('value_mask', c_ulong),
]
XConfigureRequestEvent = struct_anon_56 # /usr/include/X11/Xlib.h:825
class struct_anon_57(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'event',
'window',
'place',
]
struct_anon_57._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('event', Window),
('window', Window),
('place', c_int),
]
XCirculateEvent = struct_anon_57 # /usr/include/X11/Xlib.h:835
class struct_anon_58(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'parent',
'window',
'place',
]
struct_anon_58._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('parent', Window),
('window', Window),
('place', c_int),
]
XCirculateRequestEvent = struct_anon_58 # /usr/include/X11/Xlib.h:845
class struct_anon_59(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'atom',
'time',
'state',
]
struct_anon_59._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('atom', Atom),
('time', Time),
('state', c_int),
]
XPropertyEvent = struct_anon_59 # /usr/include/X11/Xlib.h:856
class struct_anon_60(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'selection',
'time',
]
struct_anon_60._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('selection', Atom),
('time', Time),
]
XSelectionClearEvent = struct_anon_60 # /usr/include/X11/Xlib.h:866
class struct_anon_61(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'owner',
'requestor',
'selection',
'target',
'property',
'time',
]
struct_anon_61._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('owner', Window),
('requestor', Window),
('selection', Atom),
('target', Atom),
('property', Atom),
('time', Time),
]
XSelectionRequestEvent = struct_anon_61 # /usr/include/X11/Xlib.h:879
class struct_anon_62(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'requestor',
'selection',
'target',
'property',
'time',
]
struct_anon_62._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('requestor', Window),
('selection', Atom),
('target', Atom),
('property', Atom),
('time', Time),
]
XSelectionEvent = struct_anon_62 # /usr/include/X11/Xlib.h:891
class struct_anon_63(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'colormap',
'new',
'state',
]
struct_anon_63._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('colormap', Colormap),
('new', c_int),
('state', c_int),
]
XColormapEvent = struct_anon_63 # /usr/include/X11/Xlib.h:906
class struct_anon_64(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'message_type',
'format',
'data',
]
class struct_anon_65(Union):
__slots__ = [
'b',
's',
'l',
]
struct_anon_65._fields_ = [
('b', c_char * 20),
('s', c_short * 10),
('l', c_long * 5),
]
struct_anon_64._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('message_type', Atom),
('format', c_int),
('data', struct_anon_65),
]
XClientMessageEvent = struct_anon_64 # /usr/include/X11/Xlib.h:921
class struct_anon_66(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
'request',
'first_keycode',
'count',
]
struct_anon_66._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
('request', c_int),
('first_keycode', c_int),
('count', c_int),
]
XMappingEvent = struct_anon_66 # /usr/include/X11/Xlib.h:933
class struct_anon_67(Structure):
__slots__ = [
'type',
'display',
'resourceid',
'serial',
'error_code',
'request_code',
'minor_code',
]
struct_anon_67._fields_ = [
('type', c_int),
('display', POINTER(Display)),
('resourceid', XID),
('serial', c_ulong),
('error_code', c_ubyte),
('request_code', c_ubyte),
('minor_code', c_ubyte),
]
XErrorEvent = struct_anon_67 # /usr/include/X11/Xlib.h:943
class struct_anon_68(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'window',
]
struct_anon_68._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('window', Window),
]
XAnyEvent = struct_anon_68 # /usr/include/X11/Xlib.h:951
class struct_anon_69(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'extension',
'evtype',
]
struct_anon_69._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('extension', c_int),
('evtype', c_int),
]
XGenericEvent = struct_anon_69 # /usr/include/X11/Xlib.h:967
class struct_anon_70(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'extension',
'evtype',
'cookie',
'data',
]
struct_anon_70._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('extension', c_int),
('evtype', c_int),
('cookie', c_uint),
('data', POINTER(None)),
]
XGenericEventCookie = struct_anon_70 # /usr/include/X11/Xlib.h:978
class struct__XEvent(Union):
__slots__ = [
'type',
'xany',
'xkey',
'xbutton',
'xmotion',
'xcrossing',
'xfocus',
'xexpose',
'xgraphicsexpose',
'xnoexpose',
'xvisibility',
'xcreatewindow',
'xdestroywindow',
'xunmap',
'xmap',
'xmaprequest',
'xreparent',
'xconfigure',
'xgravity',
'xresizerequest',
'xconfigurerequest',
'xcirculate',
'xcirculaterequest',
'xproperty',
'xselectionclear',
'xselectionrequest',
'xselection',
'xcolormap',
'xclient',
'xmapping',
'xerror',
'xkeymap',
'xgeneric',
'xcookie',
'pad',
]
struct__XEvent._fields_ = [
('type', c_int),
('xany', XAnyEvent),
('xkey', XKeyEvent),
('xbutton', XButtonEvent),
('xmotion', XMotionEvent),
('xcrossing', XCrossingEvent),
('xfocus', XFocusChangeEvent),
('xexpose', XExposeEvent),
('xgraphicsexpose', XGraphicsExposeEvent),
('xnoexpose', XNoExposeEvent),
('xvisibility', XVisibilityEvent),
('xcreatewindow', XCreateWindowEvent),
('xdestroywindow', XDestroyWindowEvent),
('xunmap', XUnmapEvent),
('xmap', XMapEvent),
('xmaprequest', XMapRequestEvent),
('xreparent', XReparentEvent),
('xconfigure', XConfigureEvent),
('xgravity', XGravityEvent),
('xresizerequest', XResizeRequestEvent),
('xconfigurerequest', XConfigureRequestEvent),
('xcirculate', XCirculateEvent),
('xcirculaterequest', XCirculateRequestEvent),
('xproperty', XPropertyEvent),
('xselectionclear', XSelectionClearEvent),
('xselectionrequest', XSelectionRequestEvent),
('xselection', XSelectionEvent),
('xcolormap', XColormapEvent),
('xclient', XClientMessageEvent),
('xmapping', XMappingEvent),
('xerror', XErrorEvent),
('xkeymap', XKeymapEvent),
('xgeneric', XGenericEvent),
('xcookie', XGenericEventCookie),
('pad', c_long * 24),
]
XEvent = struct__XEvent # /usr/include/X11/Xlib.h:1020
class struct_anon_71(Structure):
__slots__ = [
'lbearing',
'rbearing',
'width',
'ascent',
'descent',
'attributes',
]
struct_anon_71._fields_ = [
('lbearing', c_short),
('rbearing', c_short),
('width', c_short),
('ascent', c_short),
('descent', c_short),
('attributes', c_ushort),
]
XCharStruct = struct_anon_71 # /usr/include/X11/Xlib.h:1035
class struct_anon_72(Structure):
__slots__ = [
'name',
'card32',
]
struct_anon_72._fields_ = [
('name', Atom),
('card32', c_ulong),
]
XFontProp = struct_anon_72 # /usr/include/X11/Xlib.h:1044
class struct_anon_73(Structure):
__slots__ = [
'ext_data',
'fid',
'direction',
'min_char_or_byte2',
'max_char_or_byte2',
'min_byte1',
'max_byte1',
'all_chars_exist',
'default_char',
'n_properties',
'properties',
'min_bounds',
'max_bounds',
'per_char',
'ascent',
'descent',
]
struct_anon_73._fields_ = [
('ext_data', POINTER(XExtData)),
('fid', Font),
('direction', c_uint),
('min_char_or_byte2', c_uint),
('max_char_or_byte2', c_uint),
('min_byte1', c_uint),
('max_byte1', c_uint),
('all_chars_exist', c_int),
('default_char', c_uint),
('n_properties', c_int),
('properties', POINTER(XFontProp)),
('min_bounds', XCharStruct),
('max_bounds', XCharStruct),
('per_char', POINTER(XCharStruct)),
('ascent', c_int),
('descent', c_int),
]
XFontStruct = struct_anon_73 # /usr/include/X11/Xlib.h:1063
class struct_anon_74(Structure):
__slots__ = [
'chars',
'nchars',
'delta',
'font',
]
struct_anon_74._fields_ = [
('chars', c_char_p),
('nchars', c_int),
('delta', c_int),
('font', Font),
]
XTextItem = struct_anon_74 # /usr/include/X11/Xlib.h:1073
class struct_anon_75(Structure):
__slots__ = [
'byte1',
'byte2',
]
struct_anon_75._fields_ = [
('byte1', c_ubyte),
('byte2', c_ubyte),
]
XChar2b = struct_anon_75 # /usr/include/X11/Xlib.h:1078
class struct_anon_76(Structure):
__slots__ = [
'chars',
'nchars',
'delta',
'font',
]
struct_anon_76._fields_ = [
('chars', POINTER(XChar2b)),
('nchars', c_int),
('delta', c_int),
('font', Font),
]
XTextItem16 = struct_anon_76 # /usr/include/X11/Xlib.h:1085
class struct_anon_77(Union):
__slots__ = [
'display',
'gc',
'visual',
'screen',
'pixmap_format',
'font',
]
struct_anon_77._fields_ = [
('display', POINTER(Display)),
('gc', GC),
('visual', POINTER(Visual)),
('screen', POINTER(Screen)),
('pixmap_format', POINTER(ScreenFormat)),
('font', POINTER(XFontStruct)),
]
XEDataObject = struct_anon_77 # /usr/include/X11/Xlib.h:1093
class struct_anon_78(Structure):
__slots__ = [
'max_ink_extent',
'max_logical_extent',
]
struct_anon_78._fields_ = [
('max_ink_extent', XRectangle),
('max_logical_extent', XRectangle),
]
XFontSetExtents = struct_anon_78 # /usr/include/X11/Xlib.h:1098
class struct__XOM(Structure):
__slots__ = [
]
struct__XOM._fields_ = [
('_opaque_struct', c_int)
]
class struct__XOM(Structure):
__slots__ = [
]
struct__XOM._fields_ = [
('_opaque_struct', c_int)
]
XOM = POINTER(struct__XOM) # /usr/include/X11/Xlib.h:1104
class struct__XOC(Structure):
__slots__ = [
]
struct__XOC._fields_ = [
('_opaque_struct', c_int)
]
class struct__XOC(Structure):
__slots__ = [
]
struct__XOC._fields_ = [
('_opaque_struct', c_int)
]
XOC = POINTER(struct__XOC) # /usr/include/X11/Xlib.h:1105
class struct__XOC(Structure):
__slots__ = [
]
struct__XOC._fields_ = [
('_opaque_struct', c_int)
]
class struct__XOC(Structure):
__slots__ = [
]
struct__XOC._fields_ = [
('_opaque_struct', c_int)
]
XFontSet = POINTER(struct__XOC) # /usr/include/X11/Xlib.h:1105
class struct_anon_79(Structure):
__slots__ = [
'chars',
'nchars',
'delta',
'font_set',
]
struct_anon_79._fields_ = [
('chars', c_char_p),
('nchars', c_int),
('delta', c_int),
('font_set', XFontSet),
]
XmbTextItem = struct_anon_79 # /usr/include/X11/Xlib.h:1112
class struct_anon_80(Structure):
__slots__ = [
'chars',
'nchars',
'delta',
'font_set',
]
struct_anon_80._fields_ = [
('chars', c_wchar_p),
('nchars', c_int),
('delta', c_int),
('font_set', XFontSet),
]
XwcTextItem = struct_anon_80 # /usr/include/X11/Xlib.h:1119
class struct_anon_81(Structure):
__slots__ = [
'charset_count',
'charset_list',
]
struct_anon_81._fields_ = [
('charset_count', c_int),
('charset_list', POINTER(c_char_p)),
]
XOMCharSetList = struct_anon_81 # /usr/include/X11/Xlib.h:1135
enum_anon_82 = c_int
XOMOrientation_LTR_TTB = 0
XOMOrientation_RTL_TTB = 1
XOMOrientation_TTB_LTR = 2
XOMOrientation_TTB_RTL = 3
XOMOrientation_Context = 4
XOrientation = enum_anon_82 # /usr/include/X11/Xlib.h:1143
class struct_anon_83(Structure):
__slots__ = [
'num_orientation',
'orientation',
]
struct_anon_83._fields_ = [
('num_orientation', c_int),
('orientation', POINTER(XOrientation)),
]
XOMOrientation = struct_anon_83 # /usr/include/X11/Xlib.h:1148
class struct_anon_84(Structure):
__slots__ = [
'num_font',
'font_struct_list',
'font_name_list',
]
struct_anon_84._fields_ = [
('num_font', c_int),
('font_struct_list', POINTER(POINTER(XFontStruct))),
('font_name_list', POINTER(c_char_p)),
]
XOMFontInfo = struct_anon_84 # /usr/include/X11/Xlib.h:1154
class struct__XIM(Structure):
__slots__ = [
]
struct__XIM._fields_ = [
('_opaque_struct', c_int)
]
class struct__XIM(Structure):
__slots__ = [
]
struct__XIM._fields_ = [
('_opaque_struct', c_int)
]
XIM = POINTER(struct__XIM) # /usr/include/X11/Xlib.h:1156
class struct__XIC(Structure):
__slots__ = [
]
struct__XIC._fields_ = [
('_opaque_struct', c_int)
]
class struct__XIC(Structure):
__slots__ = [
]
struct__XIC._fields_ = [
('_opaque_struct', c_int)
]
XIC = POINTER(struct__XIC) # /usr/include/X11/Xlib.h:1157
# /usr/include/X11/Xlib.h:1159
XIMProc = CFUNCTYPE(None, XIM, XPointer, XPointer)
# /usr/include/X11/Xlib.h:1165
XICProc = CFUNCTYPE(c_int, XIC, XPointer, XPointer)
XIDProc = CFUNCTYPE(None, POINTER(Display), XPointer,
XPointer) # /usr/include/X11/Xlib.h:1171
XIMStyle = c_ulong # /usr/include/X11/Xlib.h:1177
class struct_anon_85(Structure):
__slots__ = [
'count_styles',
'supported_styles',
]
struct_anon_85._fields_ = [
('count_styles', c_ushort),
('supported_styles', POINTER(XIMStyle)),
]
XIMStyles = struct_anon_85 # /usr/include/X11/Xlib.h:1182
XIMPreeditArea = 1 # /usr/include/X11/Xlib.h:1184
XIMPreeditCallbacks = 2 # /usr/include/X11/Xlib.h:1185
XIMPreeditPosition = 4 # /usr/include/X11/Xlib.h:1186
XIMPreeditNothing = 8 # /usr/include/X11/Xlib.h:1187
XIMPreeditNone = 16 # /usr/include/X11/Xlib.h:1188
XIMStatusArea = 256 # /usr/include/X11/Xlib.h:1189
XIMStatusCallbacks = 512 # /usr/include/X11/Xlib.h:1190
XIMStatusNothing = 1024 # /usr/include/X11/Xlib.h:1191
XIMStatusNone = 2048 # /usr/include/X11/Xlib.h:1192
XBufferOverflow = -1 # /usr/include/X11/Xlib.h:1238
XLookupNone = 1 # /usr/include/X11/Xlib.h:1239
XLookupChars = 2 # /usr/include/X11/Xlib.h:1240
XLookupKeySym = 3 # /usr/include/X11/Xlib.h:1241
XLookupBoth = 4 # /usr/include/X11/Xlib.h:1242
XVaNestedList = POINTER(None) # /usr/include/X11/Xlib.h:1244
class struct_anon_86(Structure):
__slots__ = [
'client_data',
'callback',
]
struct_anon_86._fields_ = [
('client_data', XPointer),
('callback', XIMProc),
]
XIMCallback = struct_anon_86 # /usr/include/X11/Xlib.h:1249
class struct_anon_87(Structure):
__slots__ = [
'client_data',
'callback',
]
struct_anon_87._fields_ = [
('client_data', XPointer),
('callback', XICProc),
]
XICCallback = struct_anon_87 # /usr/include/X11/Xlib.h:1254
XIMFeedback = c_ulong # /usr/include/X11/Xlib.h:1256
XIMReverse = 1 # /usr/include/X11/Xlib.h:1258
XIMUnderline = 2 # /usr/include/X11/Xlib.h:1259
XIMHighlight = 4 # /usr/include/X11/Xlib.h:1260
XIMPrimary = 32 # /usr/include/X11/Xlib.h:1261
XIMSecondary = 64 # /usr/include/X11/Xlib.h:1262
XIMTertiary = 128 # /usr/include/X11/Xlib.h:1263
XIMVisibleToForward = 256 # /usr/include/X11/Xlib.h:1264
XIMVisibleToBackword = 512 # /usr/include/X11/Xlib.h:1265
XIMVisibleToCenter = 1024 # /usr/include/X11/Xlib.h:1266
class struct__XIMText(Structure):
__slots__ = [
'length',
'feedback',
'encoding_is_wchar',
'string',
]
class struct_anon_88(Union):
__slots__ = [
'multi_byte',
'wide_char',
]
struct_anon_88._fields_ = [
('multi_byte', c_char_p),
('wide_char', c_wchar_p),
]
struct__XIMText._fields_ = [
('length', c_ushort),
('feedback', POINTER(XIMFeedback)),
('encoding_is_wchar', c_int),
('string', struct_anon_88),
]
XIMText = struct__XIMText # /usr/include/X11/Xlib.h:1276
XIMPreeditState = c_ulong # /usr/include/X11/Xlib.h:1278
XIMPreeditUnKnown = 0 # /usr/include/X11/Xlib.h:1280
XIMPreeditEnable = 1 # /usr/include/X11/Xlib.h:1281
XIMPreeditDisable = 2 # /usr/include/X11/Xlib.h:1282
class struct__XIMPreeditStateNotifyCallbackStruct(Structure):
__slots__ = [
'state',
]
struct__XIMPreeditStateNotifyCallbackStruct._fields_ = [
('state', XIMPreeditState),
]
# /usr/include/X11/Xlib.h:1286
XIMPreeditStateNotifyCallbackStruct = struct__XIMPreeditStateNotifyCallbackStruct
XIMResetState = c_ulong # /usr/include/X11/Xlib.h:1288
XIMInitialState = 1 # /usr/include/X11/Xlib.h:1290
XIMPreserveState = 2 # /usr/include/X11/Xlib.h:1291
XIMStringConversionFeedback = c_ulong # /usr/include/X11/Xlib.h:1293
XIMStringConversionLeftEdge = 1 # /usr/include/X11/Xlib.h:1295
XIMStringConversionRightEdge = 2 # /usr/include/X11/Xlib.h:1296
XIMStringConversionTopEdge = 4 # /usr/include/X11/Xlib.h:1297
XIMStringConversionBottomEdge = 8 # /usr/include/X11/Xlib.h:1298
XIMStringConversionConcealed = 16 # /usr/include/X11/Xlib.h:1299
XIMStringConversionWrapped = 32 # /usr/include/X11/Xlib.h:1300
class struct__XIMStringConversionText(Structure):
__slots__ = [
'length',
'feedback',
'encoding_is_wchar',
'string',
]
class struct_anon_89(Union):
__slots__ = [
'mbs',
'wcs',
]
struct_anon_89._fields_ = [
('mbs', c_char_p),
('wcs', c_wchar_p),
]
struct__XIMStringConversionText._fields_ = [
('length', c_ushort),
('feedback', POINTER(XIMStringConversionFeedback)),
('encoding_is_wchar', c_int),
('string', struct_anon_89),
]
# /usr/include/X11/Xlib.h:1310
XIMStringConversionText = struct__XIMStringConversionText
XIMStringConversionPosition = c_ushort # /usr/include/X11/Xlib.h:1312
XIMStringConversionType = c_ushort # /usr/include/X11/Xlib.h:1314
XIMStringConversionBuffer = 1 # /usr/include/X11/Xlib.h:1316
XIMStringConversionLine = 2 # /usr/include/X11/Xlib.h:1317
XIMStringConversionWord = 3 # /usr/include/X11/Xlib.h:1318
XIMStringConversionChar = 4 # /usr/include/X11/Xlib.h:1319
XIMStringConversionOperation = c_ushort # /usr/include/X11/Xlib.h:1321
XIMStringConversionSubstitution = 1 # /usr/include/X11/Xlib.h:1323
XIMStringConversionRetrieval = 2 # /usr/include/X11/Xlib.h:1324
enum_anon_90 = c_int
XIMForwardChar = 0
XIMBackwardChar = 1
XIMForwardWord = 2
XIMBackwardWord = 3
XIMCaretUp = 4
XIMCaretDown = 5
XIMNextLine = 6
XIMPreviousLine = 7
XIMLineStart = 8
XIMLineEnd = 9
XIMAbsolutePosition = 10
XIMDontChange = 11
XIMCaretDirection = enum_anon_90 # /usr/include/X11/Xlib.h:1334
class struct__XIMStringConversionCallbackStruct(Structure):
__slots__ = [
'position',
'direction',
'operation',
'factor',
'text',
]
struct__XIMStringConversionCallbackStruct._fields_ = [
('position', XIMStringConversionPosition),
('direction', XIMCaretDirection),
('operation', XIMStringConversionOperation),
('factor', c_ushort),
('text', POINTER(XIMStringConversionText)),
]
# /usr/include/X11/Xlib.h:1342
XIMStringConversionCallbackStruct = struct__XIMStringConversionCallbackStruct
class struct__XIMPreeditDrawCallbackStruct(Structure):
__slots__ = [
'caret',
'chg_first',
'chg_length',
'text',
]
struct__XIMPreeditDrawCallbackStruct._fields_ = [
('caret', c_int),
('chg_first', c_int),
('chg_length', c_int),
('text', POINTER(XIMText)),
]
# /usr/include/X11/Xlib.h:1349
XIMPreeditDrawCallbackStruct = struct__XIMPreeditDrawCallbackStruct
enum_anon_91 = c_int
XIMIsInvisible = 0
XIMIsPrimary = 1
XIMIsSecondary = 2
XIMCaretStyle = enum_anon_91 # /usr/include/X11/Xlib.h:1355
class struct__XIMPreeditCaretCallbackStruct(Structure):
__slots__ = [
'position',
'direction',
'style',
]
struct__XIMPreeditCaretCallbackStruct._fields_ = [
('position', c_int),
('direction', XIMCaretDirection),
('style', XIMCaretStyle),
]
# /usr/include/X11/Xlib.h:1361
XIMPreeditCaretCallbackStruct = struct__XIMPreeditCaretCallbackStruct
enum_anon_92 = c_int
XIMTextType = 0
XIMBitmapType = 1
XIMStatusDataType = enum_anon_92 # /usr/include/X11/Xlib.h:1366
class struct__XIMStatusDrawCallbackStruct(Structure):
__slots__ = [
'type',
'data',
]
class struct_anon_93(Union):
__slots__ = [
'text',
'bitmap',
]
struct_anon_93._fields_ = [
('text', POINTER(XIMText)),
('bitmap', Pixmap),
]
struct__XIMStatusDrawCallbackStruct._fields_ = [
('type', XIMStatusDataType),
('data', struct_anon_93),
]
# /usr/include/X11/Xlib.h:1374
XIMStatusDrawCallbackStruct = struct__XIMStatusDrawCallbackStruct
class struct__XIMHotKeyTrigger(Structure):
__slots__ = [
'keysym',
'modifier',
'modifier_mask',
]
struct__XIMHotKeyTrigger._fields_ = [
('keysym', KeySym),
('modifier', c_int),
('modifier_mask', c_int),
]
XIMHotKeyTrigger = struct__XIMHotKeyTrigger # /usr/include/X11/Xlib.h:1380
class struct__XIMHotKeyTriggers(Structure):
__slots__ = [
'num_hot_key',
'key',
]
struct__XIMHotKeyTriggers._fields_ = [
('num_hot_key', c_int),
('key', POINTER(XIMHotKeyTrigger)),
]
XIMHotKeyTriggers = struct__XIMHotKeyTriggers # /usr/include/X11/Xlib.h:1385
XIMHotKeyState = c_ulong # /usr/include/X11/Xlib.h:1387
XIMHotKeyStateON = 1 # /usr/include/X11/Xlib.h:1389
XIMHotKeyStateOFF = 2 # /usr/include/X11/Xlib.h:1390
class struct_anon_94(Structure):
__slots__ = [
'count_values',
'supported_values',
]
struct_anon_94._fields_ = [
('count_values', c_ushort),
('supported_values', POINTER(c_char_p)),
]
XIMValuesList = struct_anon_94 # /usr/include/X11/Xlib.h:1395
# /usr/include/X11/Xlib.h:1405
XLoadQueryFont = _lib.XLoadQueryFont
XLoadQueryFont.restype = POINTER(XFontStruct)
XLoadQueryFont.argtypes = [POINTER(Display), c_char_p]
# /usr/include/X11/Xlib.h:1410
XQueryFont = _lib.XQueryFont
XQueryFont.restype = POINTER(XFontStruct)
XQueryFont.argtypes = [POINTER(Display), XID]
# /usr/include/X11/Xlib.h:1416
XGetMotionEvents = _lib.XGetMotionEvents
XGetMotionEvents.restype = POINTER(XTimeCoord)
XGetMotionEvents.argtypes = [
POINTER(Display),
Window,
Time,
Time,
POINTER(c_int)]
# /usr/include/X11/Xlib.h:1424
XDeleteModifiermapEntry = _lib.XDeleteModifiermapEntry
XDeleteModifiermapEntry.restype = POINTER(XModifierKeymap)
XDeleteModifiermapEntry.argtypes = [POINTER(XModifierKeymap), KeyCode, c_int]
# /usr/include/X11/Xlib.h:1434
XGetModifierMapping = _lib.XGetModifierMapping
XGetModifierMapping.restype = POINTER(XModifierKeymap)
XGetModifierMapping.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1438
XInsertModifiermapEntry = _lib.XInsertModifiermapEntry
XInsertModifiermapEntry.restype = POINTER(XModifierKeymap)
XInsertModifiermapEntry.argtypes = [POINTER(XModifierKeymap), KeyCode, c_int]
# /usr/include/X11/Xlib.h:1448
XNewModifiermap = _lib.XNewModifiermap
XNewModifiermap.restype = POINTER(XModifierKeymap)
XNewModifiermap.argtypes = [c_int]
# /usr/include/X11/Xlib.h:1452
XCreateImage = _lib.XCreateImage
XCreateImage.restype = POINTER(XImage)
XCreateImage.argtypes = [
POINTER(Display),
POINTER(Visual),
c_uint,
c_int,
c_int,
c_char_p,
c_uint,
c_uint,
c_int,
c_int]
# /usr/include/X11/Xlib.h:1464
XInitImage = _lib.XInitImage
XInitImage.restype = c_int
XInitImage.argtypes = [POINTER(XImage)]
# /usr/include/X11/Xlib.h:1467
XGetImage = _lib.XGetImage
XGetImage.restype = POINTER(XImage)
XGetImage.argtypes = [
POINTER(Display),
Drawable,
c_int,
c_int,
c_uint,
c_uint,
c_ulong,
c_int]
# /usr/include/X11/Xlib.h:1477
XGetSubImage = _lib.XGetSubImage
XGetSubImage.restype = POINTER(XImage)
XGetSubImage.argtypes = [
POINTER(Display),
Drawable,
c_int,
c_int,
c_uint,
c_uint,
c_ulong,
c_int,
POINTER(XImage),
c_int,
c_int]
# /usr/include/X11/Xlib.h:1494
XOpenDisplay = _lib.XOpenDisplay
XOpenDisplay.restype = POINTER(Display)
XOpenDisplay.argtypes = [c_char_p]
# /usr/include/X11/Xlib.h:1498
XrmInitialize = _lib.XrmInitialize
XrmInitialize.restype = None
XrmInitialize.argtypes = []
# /usr/include/X11/Xlib.h:1502
XFetchBytes = _lib.XFetchBytes
XFetchBytes.restype = c_char_p
XFetchBytes.argtypes = [POINTER(Display), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1506
XFetchBuffer = _lib.XFetchBuffer
XFetchBuffer.restype = c_char_p
XFetchBuffer.argtypes = [POINTER(Display), POINTER(c_int), c_int]
# /usr/include/X11/Xlib.h:1511
XGetAtomName = _lib.XGetAtomName
XGetAtomName.restype = c_char_p
XGetAtomName.argtypes = [POINTER(Display), Atom]
# /usr/include/X11/Xlib.h:1515
XGetAtomNames = _lib.XGetAtomNames
XGetAtomNames.restype = c_int
XGetAtomNames.argtypes = [
POINTER(Display),
POINTER(Atom),
c_int,
POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:1521
XGetDefault = _lib.XGetDefault
XGetDefault.restype = c_char_p
XGetDefault.argtypes = [POINTER(Display), c_char_p, c_char_p]
# /usr/include/X11/Xlib.h:1526
XDisplayName = _lib.XDisplayName
XDisplayName.restype = c_char_p
XDisplayName.argtypes = [c_char_p]
# /usr/include/X11/Xlib.h:1529
XKeysymToString = _lib.XKeysymToString
XKeysymToString.restype = c_char_p
XKeysymToString.argtypes = [KeySym]
# /usr/include/X11/Xlib.h:1533
XSynchronize = _lib.XSynchronize
XSynchronize.restype = POINTER(CFUNCTYPE(c_int, POINTER(Display)))
XSynchronize.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1539
XSetAfterFunction = _lib.XSetAfterFunction
XSetAfterFunction.restype = POINTER(CFUNCTYPE(c_int, POINTER(Display)))
XSetAfterFunction.argtypes = [
POINTER(Display), CFUNCTYPE(
c_int, POINTER(Display))]
# /usr/include/X11/Xlib.h:1547
XInternAtom = _lib.XInternAtom
XInternAtom.restype = Atom
XInternAtom.argtypes = [POINTER(Display), c_char_p, c_int]
# /usr/include/X11/Xlib.h:1552
XInternAtoms = _lib.XInternAtoms
XInternAtoms.restype = c_int
XInternAtoms.argtypes = [
POINTER(Display),
POINTER(c_char_p),
c_int,
c_int,
POINTER(Atom)]
# /usr/include/X11/Xlib.h:1559
XCopyColormapAndFree = _lib.XCopyColormapAndFree
XCopyColormapAndFree.restype = Colormap
XCopyColormapAndFree.argtypes = [POINTER(Display), Colormap]
# /usr/include/X11/Xlib.h:1563
XCreateColormap = _lib.XCreateColormap
XCreateColormap.restype = Colormap
XCreateColormap.argtypes = [POINTER(Display), Window, POINTER(Visual), c_int]
# /usr/include/X11/Xlib.h:1569
XCreatePixmapCursor = _lib.XCreatePixmapCursor
XCreatePixmapCursor.restype = Cursor
XCreatePixmapCursor.argtypes = [
POINTER(Display),
Pixmap,
Pixmap,
POINTER(XColor),
POINTER(XColor),
c_uint,
c_uint]
# /usr/include/X11/Xlib.h:1578
XCreateGlyphCursor = _lib.XCreateGlyphCursor
XCreateGlyphCursor.restype = Cursor
XCreateGlyphCursor.argtypes = [
POINTER(Display),
Font,
Font,
c_uint,
c_uint,
POINTER(XColor),
POINTER(XColor)]
# /usr/include/X11/Xlib.h:1587
XCreateFontCursor = _lib.XCreateFontCursor
XCreateFontCursor.restype = Cursor
XCreateFontCursor.argtypes = [POINTER(Display), c_uint]
# /usr/include/X11/Xlib.h:1591
XLoadFont = _lib.XLoadFont
XLoadFont.restype = Font
XLoadFont.argtypes = [POINTER(Display), c_char_p]
# /usr/include/X11/Xlib.h:1595
XCreateGC = _lib.XCreateGC
XCreateGC.restype = GC
XCreateGC.argtypes = [POINTER(Display), Drawable, c_ulong, POINTER(XGCValues)]
# /usr/include/X11/Xlib.h:1601
XGContextFromGC = _lib.XGContextFromGC
XGContextFromGC.restype = GContext
XGContextFromGC.argtypes = [GC]
# /usr/include/X11/Xlib.h:1604
XFlushGC = _lib.XFlushGC
XFlushGC.restype = None
XFlushGC.argtypes = [POINTER(Display), GC]
# /usr/include/X11/Xlib.h:1608
XCreatePixmap = _lib.XCreatePixmap
XCreatePixmap.restype = Pixmap
XCreatePixmap.argtypes = [POINTER(Display), Drawable, c_uint, c_uint, c_uint]
# /usr/include/X11/Xlib.h:1615
XCreateBitmapFromData = _lib.XCreateBitmapFromData
XCreateBitmapFromData.restype = Pixmap
XCreateBitmapFromData.argtypes = [
POINTER(Display),
Drawable,
c_char_p,
c_uint,
c_uint]
# /usr/include/X11/Xlib.h:1622
XCreatePixmapFromBitmapData = _lib.XCreatePixmapFromBitmapData
XCreatePixmapFromBitmapData.restype = Pixmap
XCreatePixmapFromBitmapData.argtypes = [
POINTER(Display),
Drawable,
c_char_p,
c_uint,
c_uint,
c_ulong,
c_ulong,
c_uint]
# /usr/include/X11/Xlib.h:1632
XCreateSimpleWindow = _lib.XCreateSimpleWindow
XCreateSimpleWindow.restype = Window
XCreateSimpleWindow.argtypes = [
POINTER(Display),
Window,
c_int,
c_int,
c_uint,
c_uint,
c_uint,
c_ulong,
c_ulong]
# /usr/include/X11/Xlib.h:1643
XGetSelectionOwner = _lib.XGetSelectionOwner
XGetSelectionOwner.restype = Window
XGetSelectionOwner.argtypes = [POINTER(Display), Atom]
# /usr/include/X11/Xlib.h:1647
XCreateWindow = _lib.XCreateWindow
XCreateWindow.restype = Window
XCreateWindow.argtypes = [
POINTER(Display),
Window,
c_int,
c_int,
c_uint,
c_uint,
c_uint,
c_int,
c_uint,
POINTER(Visual),
c_ulong,
POINTER(XSetWindowAttributes)]
# /usr/include/X11/Xlib.h:1661
XListInstalledColormaps = _lib.XListInstalledColormaps
XListInstalledColormaps.restype = POINTER(Colormap)
XListInstalledColormaps.argtypes = [POINTER(Display), Window, POINTER(c_int)]
# /usr/include/X11/Xlib.h:1666
XListFonts = _lib.XListFonts
XListFonts.restype = POINTER(c_char_p)
XListFonts.argtypes = [POINTER(Display), c_char_p, c_int, POINTER(c_int)]
# /usr/include/X11/Xlib.h:1672
XListFontsWithInfo = _lib.XListFontsWithInfo
XListFontsWithInfo.restype = POINTER(c_char_p)
XListFontsWithInfo.argtypes = [
POINTER(Display),
c_char_p,
c_int,
POINTER(c_int),
POINTER(
POINTER(XFontStruct))]
# /usr/include/X11/Xlib.h:1679
XGetFontPath = _lib.XGetFontPath
XGetFontPath.restype = POINTER(c_char_p)
XGetFontPath.argtypes = [POINTER(Display), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1683
XListExtensions = _lib.XListExtensions
XListExtensions.restype = POINTER(c_char_p)
XListExtensions.argtypes = [POINTER(Display), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1687
XListProperties = _lib.XListProperties
XListProperties.restype = POINTER(Atom)
XListProperties.argtypes = [POINTER(Display), Window, POINTER(c_int)]
# /usr/include/X11/Xlib.h:1692
XListHosts = _lib.XListHosts
XListHosts.restype = POINTER(XHostAddress)
XListHosts.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1697
XKeycodeToKeysym = _lib.XKeycodeToKeysym
XKeycodeToKeysym.restype = KeySym
XKeycodeToKeysym.argtypes = [POINTER(Display), KeyCode, c_int]
# /usr/include/X11/Xlib.h:1706
XLookupKeysym = _lib.XLookupKeysym
XLookupKeysym.restype = KeySym
XLookupKeysym.argtypes = [POINTER(XKeyEvent), c_int]
# /usr/include/X11/Xlib.h:1710
XGetKeyboardMapping = _lib.XGetKeyboardMapping
XGetKeyboardMapping.restype = POINTER(KeySym)
XGetKeyboardMapping.argtypes = [
POINTER(Display),
KeyCode,
c_int,
POINTER(c_int)]
# /usr/include/X11/Xlib.h:1720
XStringToKeysym = _lib.XStringToKeysym
XStringToKeysym.restype = KeySym
XStringToKeysym.argtypes = [c_char_p]
# /usr/include/X11/Xlib.h:1723
XMaxRequestSize = _lib.XMaxRequestSize
XMaxRequestSize.restype = c_long
XMaxRequestSize.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1726
XExtendedMaxRequestSize = _lib.XExtendedMaxRequestSize
XExtendedMaxRequestSize.restype = c_long
XExtendedMaxRequestSize.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1729
XResourceManagerString = _lib.XResourceManagerString
XResourceManagerString.restype = c_char_p
XResourceManagerString.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1732
XScreenResourceString = _lib.XScreenResourceString
XScreenResourceString.restype = c_char_p
XScreenResourceString.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1735
XDisplayMotionBufferSize = _lib.XDisplayMotionBufferSize
XDisplayMotionBufferSize.restype = c_ulong
XDisplayMotionBufferSize.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1738
XVisualIDFromVisual = _lib.XVisualIDFromVisual
XVisualIDFromVisual.restype = VisualID
XVisualIDFromVisual.argtypes = [POINTER(Visual)]
# /usr/include/X11/Xlib.h:1744
XInitThreads = _lib.XInitThreads
XInitThreads.restype = c_int
XInitThreads.argtypes = []
# /usr/include/X11/Xlib.h:1748
XLockDisplay = _lib.XLockDisplay
XLockDisplay.restype = None
XLockDisplay.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1752
XUnlockDisplay = _lib.XUnlockDisplay
XUnlockDisplay.restype = None
XUnlockDisplay.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1758
XInitExtension = _lib.XInitExtension
XInitExtension.restype = POINTER(XExtCodes)
XInitExtension.argtypes = [POINTER(Display), c_char_p]
# /usr/include/X11/Xlib.h:1763
XAddExtension = _lib.XAddExtension
XAddExtension.restype = POINTER(XExtCodes)
XAddExtension.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1766
XFindOnExtensionList = _lib.XFindOnExtensionList
XFindOnExtensionList.restype = POINTER(XExtData)
XFindOnExtensionList.argtypes = [POINTER(POINTER(XExtData)), c_int]
# /usr/include/X11/Xlib.h:1770
XEHeadOfExtensionList = _lib.XEHeadOfExtensionList
XEHeadOfExtensionList.restype = POINTER(POINTER(XExtData))
XEHeadOfExtensionList.argtypes = [XEDataObject]
# /usr/include/X11/Xlib.h:1775
XRootWindow = _lib.XRootWindow
XRootWindow.restype = Window
XRootWindow.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1779
XDefaultRootWindow = _lib.XDefaultRootWindow
XDefaultRootWindow.restype = Window
XDefaultRootWindow.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1782
XRootWindowOfScreen = _lib.XRootWindowOfScreen
XRootWindowOfScreen.restype = Window
XRootWindowOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1785
XDefaultVisual = _lib.XDefaultVisual
XDefaultVisual.restype = POINTER(Visual)
XDefaultVisual.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1789
XDefaultVisualOfScreen = _lib.XDefaultVisualOfScreen
XDefaultVisualOfScreen.restype = POINTER(Visual)
XDefaultVisualOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1792
XDefaultGC = _lib.XDefaultGC
XDefaultGC.restype = GC
XDefaultGC.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1796
XDefaultGCOfScreen = _lib.XDefaultGCOfScreen
XDefaultGCOfScreen.restype = GC
XDefaultGCOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1799
XBlackPixel = _lib.XBlackPixel
XBlackPixel.restype = c_ulong
XBlackPixel.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1803
XWhitePixel = _lib.XWhitePixel
XWhitePixel.restype = c_ulong
XWhitePixel.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1807
XAllPlanes = _lib.XAllPlanes
XAllPlanes.restype = c_ulong
XAllPlanes.argtypes = []
# /usr/include/X11/Xlib.h:1810
XBlackPixelOfScreen = _lib.XBlackPixelOfScreen
XBlackPixelOfScreen.restype = c_ulong
XBlackPixelOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1813
XWhitePixelOfScreen = _lib.XWhitePixelOfScreen
XWhitePixelOfScreen.restype = c_ulong
XWhitePixelOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1816
XNextRequest = _lib.XNextRequest
XNextRequest.restype = c_ulong
XNextRequest.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1819
XLastKnownRequestProcessed = _lib.XLastKnownRequestProcessed
XLastKnownRequestProcessed.restype = c_ulong
XLastKnownRequestProcessed.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1822
XServerVendor = _lib.XServerVendor
XServerVendor.restype = c_char_p
XServerVendor.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1825
XDisplayString = _lib.XDisplayString
XDisplayString.restype = c_char_p
XDisplayString.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1828
XDefaultColormap = _lib.XDefaultColormap
XDefaultColormap.restype = Colormap
XDefaultColormap.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1832
XDefaultColormapOfScreen = _lib.XDefaultColormapOfScreen
XDefaultColormapOfScreen.restype = Colormap
XDefaultColormapOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1835
XDisplayOfScreen = _lib.XDisplayOfScreen
XDisplayOfScreen.restype = POINTER(Display)
XDisplayOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1838
XScreenOfDisplay = _lib.XScreenOfDisplay
XScreenOfDisplay.restype = POINTER(Screen)
XScreenOfDisplay.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:1842
XDefaultScreenOfDisplay = _lib.XDefaultScreenOfDisplay
XDefaultScreenOfDisplay.restype = POINTER(Screen)
XDefaultScreenOfDisplay.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1845
XEventMaskOfScreen = _lib.XEventMaskOfScreen
XEventMaskOfScreen.restype = c_long
XEventMaskOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:1849
XScreenNumberOfScreen = _lib.XScreenNumberOfScreen
XScreenNumberOfScreen.restype = c_int
XScreenNumberOfScreen.argtypes = [POINTER(Screen)]
XErrorHandler = CFUNCTYPE(c_int, POINTER(Display), POINTER(
XErrorEvent)) # /usr/include/X11/Xlib.h:1853
# /usr/include/X11/Xlib.h:1858
XSetErrorHandler = _lib.XSetErrorHandler
XSetErrorHandler.restype = XErrorHandler
XSetErrorHandler.argtypes = [XErrorHandler]
# /usr/include/X11/Xlib.h:1863
XIOErrorHandler = CFUNCTYPE(c_int, POINTER(Display))
# /usr/include/X11/Xlib.h:1867
XSetIOErrorHandler = _lib.XSetIOErrorHandler
XSetIOErrorHandler.restype = XIOErrorHandler
XSetIOErrorHandler.argtypes = [XIOErrorHandler]
# /usr/include/X11/Xlib.h:1872
XListPixmapFormats = _lib.XListPixmapFormats
XListPixmapFormats.restype = POINTER(XPixmapFormatValues)
XListPixmapFormats.argtypes = [POINTER(Display), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1876
XListDepths = _lib.XListDepths
XListDepths.restype = POINTER(c_int)
XListDepths.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
# /usr/include/X11/Xlib.h:1884
XReconfigureWMWindow = _lib.XReconfigureWMWindow
XReconfigureWMWindow.restype = c_int
XReconfigureWMWindow.argtypes = [
POINTER(Display),
Window,
c_int,
c_uint,
POINTER(XWindowChanges)]
# /usr/include/X11/Xlib.h:1892
XGetWMProtocols = _lib.XGetWMProtocols
XGetWMProtocols.restype = c_int
XGetWMProtocols.argtypes = [
POINTER(Display), Window, POINTER(
POINTER(Atom)), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1898
XSetWMProtocols = _lib.XSetWMProtocols
XSetWMProtocols.restype = c_int
XSetWMProtocols.argtypes = [POINTER(Display), Window, POINTER(Atom), c_int]
# /usr/include/X11/Xlib.h:1904
XIconifyWindow = _lib.XIconifyWindow
XIconifyWindow.restype = c_int
XIconifyWindow.argtypes = [POINTER(Display), Window, c_int]
# /usr/include/X11/Xlib.h:1909
XWithdrawWindow = _lib.XWithdrawWindow
XWithdrawWindow.restype = c_int
XWithdrawWindow.argtypes = [POINTER(Display), Window, c_int]
# /usr/include/X11/Xlib.h:1914
XGetCommand = _lib.XGetCommand
XGetCommand.restype = c_int
XGetCommand.argtypes = [
POINTER(Display),
Window,
POINTER(
POINTER(c_char_p)),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:1920
XGetWMColormapWindows = _lib.XGetWMColormapWindows
XGetWMColormapWindows.restype = c_int
XGetWMColormapWindows.argtypes = [
POINTER(Display), Window, POINTER(
POINTER(Window)), POINTER(c_int)]
# /usr/include/X11/Xlib.h:1926
XSetWMColormapWindows = _lib.XSetWMColormapWindows
XSetWMColormapWindows.restype = c_int
XSetWMColormapWindows.argtypes = [
POINTER(Display), Window, POINTER(Window), c_int]
# /usr/include/X11/Xlib.h:1932
XFreeStringList = _lib.XFreeStringList
XFreeStringList.restype = None
XFreeStringList.argtypes = [POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:1935
XSetTransientForHint = _lib.XSetTransientForHint
XSetTransientForHint.restype = c_int
XSetTransientForHint.argtypes = [POINTER(Display), Window, Window]
# /usr/include/X11/Xlib.h:1943
XActivateScreenSaver = _lib.XActivateScreenSaver
XActivateScreenSaver.restype = c_int
XActivateScreenSaver.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:1947
XAddHost = _lib.XAddHost
XAddHost.restype = c_int
XAddHost.argtypes = [POINTER(Display), POINTER(XHostAddress)]
# /usr/include/X11/Xlib.h:1952
XAddHosts = _lib.XAddHosts
XAddHosts.restype = c_int
XAddHosts.argtypes = [POINTER(Display), POINTER(XHostAddress), c_int]
# /usr/include/X11/Xlib.h:1958
XAddToExtensionList = _lib.XAddToExtensionList
XAddToExtensionList.restype = c_int
XAddToExtensionList.argtypes = [
POINTER(
POINTER(struct__XExtData)),
POINTER(XExtData)]
# /usr/include/X11/Xlib.h:1963
XAddToSaveSet = _lib.XAddToSaveSet
XAddToSaveSet.restype = c_int
XAddToSaveSet.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:1968
XAllocColor = _lib.XAllocColor
XAllocColor.restype = c_int
XAllocColor.argtypes = [POINTER(Display), Colormap, POINTER(XColor)]
# /usr/include/X11/Xlib.h:1974
XAllocColorCells = _lib.XAllocColorCells
XAllocColorCells.restype = c_int
XAllocColorCells.argtypes = [
POINTER(Display),
Colormap,
c_int,
POINTER(c_ulong),
c_uint,
POINTER(c_ulong),
c_uint]
# /usr/include/X11/Xlib.h:1984
XAllocColorPlanes = _lib.XAllocColorPlanes
XAllocColorPlanes.restype = c_int
XAllocColorPlanes.argtypes = [
POINTER(Display),
Colormap,
c_int,
POINTER(c_ulong),
c_int,
c_int,
c_int,
c_int,
POINTER(c_ulong),
POINTER(c_ulong),
POINTER(c_ulong)]
# /usr/include/X11/Xlib.h:1998
XAllocNamedColor = _lib.XAllocNamedColor
XAllocNamedColor.restype = c_int
XAllocNamedColor.argtypes = [
POINTER(Display),
Colormap,
c_char_p,
POINTER(XColor),
POINTER(XColor)]
# /usr/include/X11/Xlib.h:2006
XAllowEvents = _lib.XAllowEvents
XAllowEvents.restype = c_int
XAllowEvents.argtypes = [POINTER(Display), c_int, Time]
# /usr/include/X11/Xlib.h:2012
XAutoRepeatOff = _lib.XAutoRepeatOff
XAutoRepeatOff.restype = c_int
XAutoRepeatOff.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2016
XAutoRepeatOn = _lib.XAutoRepeatOn
XAutoRepeatOn.restype = c_int
XAutoRepeatOn.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2020
XBell = _lib.XBell
XBell.restype = c_int
XBell.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2025
XBitmapBitOrder = _lib.XBitmapBitOrder
XBitmapBitOrder.restype = c_int
XBitmapBitOrder.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2029
XBitmapPad = _lib.XBitmapPad
XBitmapPad.restype = c_int
XBitmapPad.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2033
XBitmapUnit = _lib.XBitmapUnit
XBitmapUnit.restype = c_int
XBitmapUnit.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2037
XCellsOfScreen = _lib.XCellsOfScreen
XCellsOfScreen.restype = c_int
XCellsOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2041
XChangeActivePointerGrab = _lib.XChangeActivePointerGrab
XChangeActivePointerGrab.restype = c_int
XChangeActivePointerGrab.argtypes = [POINTER(Display), c_uint, Cursor, Time]
# /usr/include/X11/Xlib.h:2048
XChangeGC = _lib.XChangeGC
XChangeGC.restype = c_int
XChangeGC.argtypes = [POINTER(Display), GC, c_ulong, POINTER(XGCValues)]
# /usr/include/X11/Xlib.h:2055
XChangeKeyboardControl = _lib.XChangeKeyboardControl
XChangeKeyboardControl.restype = c_int
XChangeKeyboardControl.argtypes = [
POINTER(Display),
c_ulong,
POINTER(XKeyboardControl)]
# /usr/include/X11/Xlib.h:2061
XChangeKeyboardMapping = _lib.XChangeKeyboardMapping
XChangeKeyboardMapping.restype = c_int
XChangeKeyboardMapping.argtypes = [
POINTER(Display),
c_int,
c_int,
POINTER(KeySym),
c_int]
# /usr/include/X11/Xlib.h:2069
XChangePointerControl = _lib.XChangePointerControl
XChangePointerControl.restype = c_int
XChangePointerControl.argtypes = [
POINTER(Display), c_int, c_int, c_int, c_int, c_int]
# /usr/include/X11/Xlib.h:2078
XChangeProperty = _lib.XChangeProperty
XChangeProperty.restype = c_int
XChangeProperty.argtypes = [
POINTER(Display),
Window,
Atom,
Atom,
c_int,
c_int,
POINTER(c_ubyte),
c_int]
# /usr/include/X11/Xlib.h:2089
XChangeSaveSet = _lib.XChangeSaveSet
XChangeSaveSet.restype = c_int
XChangeSaveSet.argtypes = [POINTER(Display), Window, c_int]
# /usr/include/X11/Xlib.h:2095
XChangeWindowAttributes = _lib.XChangeWindowAttributes
XChangeWindowAttributes.restype = c_int
XChangeWindowAttributes.argtypes = [
POINTER(Display),
Window,
c_ulong,
POINTER(XSetWindowAttributes)]
# /usr/include/X11/Xlib.h:2102
XCheckIfEvent = _lib.XCheckIfEvent
XCheckIfEvent.restype = c_int
XCheckIfEvent.argtypes = [
POINTER(Display),
POINTER(XEvent),
CFUNCTYPE(
c_int,
POINTER(Display),
POINTER(XEvent),
XPointer),
XPointer]
# /usr/include/X11/Xlib.h:2113
XCheckMaskEvent = _lib.XCheckMaskEvent
XCheckMaskEvent.restype = c_int
XCheckMaskEvent.argtypes = [POINTER(Display), c_long, POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2119
XCheckTypedEvent = _lib.XCheckTypedEvent
XCheckTypedEvent.restype = c_int
XCheckTypedEvent.argtypes = [POINTER(Display), c_int, POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2125
XCheckTypedWindowEvent = _lib.XCheckTypedWindowEvent
XCheckTypedWindowEvent.restype = c_int
XCheckTypedWindowEvent.argtypes = [
POINTER(Display), Window, c_int, POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2132
XCheckWindowEvent = _lib.XCheckWindowEvent
XCheckWindowEvent.restype = c_int
XCheckWindowEvent.argtypes = [
POINTER(Display),
Window,
c_long,
POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2139
XCirculateSubwindows = _lib.XCirculateSubwindows
XCirculateSubwindows.restype = c_int
XCirculateSubwindows.argtypes = [POINTER(Display), Window, c_int]
# /usr/include/X11/Xlib.h:2145
XCirculateSubwindowsDown = _lib.XCirculateSubwindowsDown
XCirculateSubwindowsDown.restype = c_int
XCirculateSubwindowsDown.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2150
XCirculateSubwindowsUp = _lib.XCirculateSubwindowsUp
XCirculateSubwindowsUp.restype = c_int
XCirculateSubwindowsUp.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2155
XClearArea = _lib.XClearArea
XClearArea.restype = c_int
XClearArea.argtypes = [
POINTER(Display),
Window,
c_int,
c_int,
c_uint,
c_uint,
c_int]
# /usr/include/X11/Xlib.h:2165
XClearWindow = _lib.XClearWindow
XClearWindow.restype = c_int
XClearWindow.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2170
XCloseDisplay = _lib.XCloseDisplay
XCloseDisplay.restype = c_int
XCloseDisplay.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2174
XConfigureWindow = _lib.XConfigureWindow
XConfigureWindow.restype = c_int
XConfigureWindow.argtypes = [
POINTER(Display),
Window,
c_uint,
POINTER(XWindowChanges)]
# /usr/include/X11/Xlib.h:2181
XConnectionNumber = _lib.XConnectionNumber
XConnectionNumber.restype = c_int
XConnectionNumber.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2185
XConvertSelection = _lib.XConvertSelection
XConvertSelection.restype = c_int
XConvertSelection.argtypes = [POINTER(Display), Atom, Atom, Atom, Window, Time]
# /usr/include/X11/Xlib.h:2194
XCopyArea = _lib.XCopyArea
XCopyArea.restype = c_int
XCopyArea.argtypes = [
POINTER(Display),
Drawable,
Drawable,
GC,
c_int,
c_int,
c_uint,
c_uint,
c_int,
c_int]
# /usr/include/X11/Xlib.h:2207
XCopyGC = _lib.XCopyGC
XCopyGC.restype = c_int
XCopyGC.argtypes = [POINTER(Display), GC, c_ulong, GC]
# /usr/include/X11/Xlib.h:2214
XCopyPlane = _lib.XCopyPlane
XCopyPlane.restype = c_int
XCopyPlane.argtypes = [
POINTER(Display),
Drawable,
Drawable,
GC,
c_int,
c_int,
c_uint,
c_uint,
c_int,
c_int,
c_ulong]
# /usr/include/X11/Xlib.h:2228
XDefaultDepth = _lib.XDefaultDepth
XDefaultDepth.restype = c_int
XDefaultDepth.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2233
XDefaultDepthOfScreen = _lib.XDefaultDepthOfScreen
XDefaultDepthOfScreen.restype = c_int
XDefaultDepthOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2237
XDefaultScreen = _lib.XDefaultScreen
XDefaultScreen.restype = c_int
XDefaultScreen.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2241
XDefineCursor = _lib.XDefineCursor
XDefineCursor.restype = c_int
XDefineCursor.argtypes = [POINTER(Display), Window, Cursor]
# /usr/include/X11/Xlib.h:2247
XDeleteProperty = _lib.XDeleteProperty
XDeleteProperty.restype = c_int
XDeleteProperty.argtypes = [POINTER(Display), Window, Atom]
# /usr/include/X11/Xlib.h:2253
XDestroyWindow = _lib.XDestroyWindow
XDestroyWindow.restype = c_int
XDestroyWindow.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2258
XDestroySubwindows = _lib.XDestroySubwindows
XDestroySubwindows.restype = c_int
XDestroySubwindows.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2263
XDoesBackingStore = _lib.XDoesBackingStore
XDoesBackingStore.restype = c_int
XDoesBackingStore.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2267
XDoesSaveUnders = _lib.XDoesSaveUnders
XDoesSaveUnders.restype = c_int
XDoesSaveUnders.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2271
XDisableAccessControl = _lib.XDisableAccessControl
XDisableAccessControl.restype = c_int
XDisableAccessControl.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2276
XDisplayCells = _lib.XDisplayCells
XDisplayCells.restype = c_int
XDisplayCells.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2281
XDisplayHeight = _lib.XDisplayHeight
XDisplayHeight.restype = c_int
XDisplayHeight.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2286
XDisplayHeightMM = _lib.XDisplayHeightMM
XDisplayHeightMM.restype = c_int
XDisplayHeightMM.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2291
XDisplayKeycodes = _lib.XDisplayKeycodes
XDisplayKeycodes.restype = c_int
XDisplayKeycodes.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/Xlib.h:2297
XDisplayPlanes = _lib.XDisplayPlanes
XDisplayPlanes.restype = c_int
XDisplayPlanes.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2302
XDisplayWidth = _lib.XDisplayWidth
XDisplayWidth.restype = c_int
XDisplayWidth.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2307
XDisplayWidthMM = _lib.XDisplayWidthMM
XDisplayWidthMM.restype = c_int
XDisplayWidthMM.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2312
XDrawArc = _lib.XDrawArc
XDrawArc.restype = c_int
XDrawArc.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_uint,
c_uint,
c_int,
c_int]
# /usr/include/X11/Xlib.h:2324
XDrawArcs = _lib.XDrawArcs
XDrawArcs.restype = c_int
XDrawArcs.argtypes = [POINTER(Display), Drawable, GC, POINTER(XArc), c_int]
# /usr/include/X11/Xlib.h:2332
XDrawImageString = _lib.XDrawImageString
XDrawImageString.restype = c_int
XDrawImageString.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_char_p,
c_int]
# /usr/include/X11/Xlib.h:2342
XDrawImageString16 = _lib.XDrawImageString16
XDrawImageString16.restype = c_int
XDrawImageString16.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XChar2b),
c_int]
# /usr/include/X11/Xlib.h:2352
XDrawLine = _lib.XDrawLine
XDrawLine.restype = c_int
XDrawLine.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_int,
c_int]
# /usr/include/X11/Xlib.h:2362
XDrawLines = _lib.XDrawLines
XDrawLines.restype = c_int
XDrawLines.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XPoint),
c_int,
c_int]
# /usr/include/X11/Xlib.h:2371
XDrawPoint = _lib.XDrawPoint
XDrawPoint.restype = c_int
XDrawPoint.argtypes = [POINTER(Display), Drawable, GC, c_int, c_int]
# /usr/include/X11/Xlib.h:2379
XDrawPoints = _lib.XDrawPoints
XDrawPoints.restype = c_int
XDrawPoints.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XPoint),
c_int,
c_int]
# /usr/include/X11/Xlib.h:2388
XDrawRectangle = _lib.XDrawRectangle
XDrawRectangle.restype = c_int
XDrawRectangle.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_uint,
c_uint]
# /usr/include/X11/Xlib.h:2398
XDrawRectangles = _lib.XDrawRectangles
XDrawRectangles.restype = c_int
XDrawRectangles.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XRectangle),
c_int]
# /usr/include/X11/Xlib.h:2406
XDrawSegments = _lib.XDrawSegments
XDrawSegments.restype = c_int
XDrawSegments.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XSegment),
c_int]
# /usr/include/X11/Xlib.h:2414
XDrawString = _lib.XDrawString
XDrawString.restype = c_int
XDrawString.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_char_p,
c_int]
# /usr/include/X11/Xlib.h:2424
XDrawString16 = _lib.XDrawString16
XDrawString16.restype = c_int
XDrawString16.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XChar2b),
c_int]
# /usr/include/X11/Xlib.h:2434
XDrawText = _lib.XDrawText
XDrawText.restype = c_int
XDrawText.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XTextItem),
c_int]
# /usr/include/X11/Xlib.h:2444
XDrawText16 = _lib.XDrawText16
XDrawText16.restype = c_int
XDrawText16.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XTextItem16),
c_int]
# /usr/include/X11/Xlib.h:2454
XEnableAccessControl = _lib.XEnableAccessControl
XEnableAccessControl.restype = c_int
XEnableAccessControl.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2458
XEventsQueued = _lib.XEventsQueued
XEventsQueued.restype = c_int
XEventsQueued.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2463
XFetchName = _lib.XFetchName
XFetchName.restype = c_int
XFetchName.argtypes = [POINTER(Display), Window, POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:2469
XFillArc = _lib.XFillArc
XFillArc.restype = c_int
XFillArc.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_uint,
c_uint,
c_int,
c_int]
# /usr/include/X11/Xlib.h:2481
XFillArcs = _lib.XFillArcs
XFillArcs.restype = c_int
XFillArcs.argtypes = [POINTER(Display), Drawable, GC, POINTER(XArc), c_int]
# /usr/include/X11/Xlib.h:2489
XFillPolygon = _lib.XFillPolygon
XFillPolygon.restype = c_int
XFillPolygon.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XPoint),
c_int,
c_int,
c_int]
# /usr/include/X11/Xlib.h:2499
XFillRectangle = _lib.XFillRectangle
XFillRectangle.restype = c_int
XFillRectangle.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
c_uint,
c_uint]
# /usr/include/X11/Xlib.h:2509
XFillRectangles = _lib.XFillRectangles
XFillRectangles.restype = c_int
XFillRectangles.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XRectangle),
c_int]
# /usr/include/X11/Xlib.h:2517
XFlush = _lib.XFlush
XFlush.restype = c_int
XFlush.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2521
XForceScreenSaver = _lib.XForceScreenSaver
XForceScreenSaver.restype = c_int
XForceScreenSaver.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:2526
XFree = _lib.XFree
XFree.restype = c_int
XFree.argtypes = [POINTER(None)]
# /usr/include/X11/Xlib.h:2530
XFreeColormap = _lib.XFreeColormap
XFreeColormap.restype = c_int
XFreeColormap.argtypes = [POINTER(Display), Colormap]
# /usr/include/X11/Xlib.h:2535
XFreeColors = _lib.XFreeColors
XFreeColors.restype = c_int
XFreeColors.argtypes = [
POINTER(Display),
Colormap,
POINTER(c_ulong),
c_int,
c_ulong]
# /usr/include/X11/Xlib.h:2543
XFreeCursor = _lib.XFreeCursor
XFreeCursor.restype = c_int
XFreeCursor.argtypes = [POINTER(Display), Cursor]
# /usr/include/X11/Xlib.h:2548
XFreeExtensionList = _lib.XFreeExtensionList
XFreeExtensionList.restype = c_int
XFreeExtensionList.argtypes = [POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:2552
XFreeFont = _lib.XFreeFont
XFreeFont.restype = c_int
XFreeFont.argtypes = [POINTER(Display), POINTER(XFontStruct)]
# /usr/include/X11/Xlib.h:2557
XFreeFontInfo = _lib.XFreeFontInfo
XFreeFontInfo.restype = c_int
XFreeFontInfo.argtypes = [POINTER(c_char_p), POINTER(XFontStruct), c_int]
# /usr/include/X11/Xlib.h:2563
XFreeFontNames = _lib.XFreeFontNames
XFreeFontNames.restype = c_int
XFreeFontNames.argtypes = [POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:2567
XFreeFontPath = _lib.XFreeFontPath
XFreeFontPath.restype = c_int
XFreeFontPath.argtypes = [POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:2571
XFreeGC = _lib.XFreeGC
XFreeGC.restype = c_int
XFreeGC.argtypes = [POINTER(Display), GC]
# /usr/include/X11/Xlib.h:2576
XFreeModifiermap = _lib.XFreeModifiermap
XFreeModifiermap.restype = c_int
XFreeModifiermap.argtypes = [POINTER(XModifierKeymap)]
# /usr/include/X11/Xlib.h:2580
XFreePixmap = _lib.XFreePixmap
XFreePixmap.restype = c_int
XFreePixmap.argtypes = [POINTER(Display), Pixmap]
# /usr/include/X11/Xlib.h:2585
XGeometry = _lib.XGeometry
XGeometry.restype = c_int
XGeometry.argtypes = [
POINTER(Display),
c_int,
c_char_p,
c_char_p,
c_uint,
c_uint,
c_uint,
c_int,
c_int,
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:2601
XGetErrorDatabaseText = _lib.XGetErrorDatabaseText
XGetErrorDatabaseText.restype = c_int
XGetErrorDatabaseText.argtypes = [
POINTER(Display),
c_char_p,
c_char_p,
c_char_p,
c_char_p,
c_int]
# /usr/include/X11/Xlib.h:2610
XGetErrorText = _lib.XGetErrorText
XGetErrorText.restype = c_int
XGetErrorText.argtypes = [POINTER(Display), c_int, c_char_p, c_int]
# /usr/include/X11/Xlib.h:2617
XGetFontProperty = _lib.XGetFontProperty
XGetFontProperty.restype = c_int
XGetFontProperty.argtypes = [POINTER(XFontStruct), Atom, POINTER(c_ulong)]
# /usr/include/X11/Xlib.h:2623
XGetGCValues = _lib.XGetGCValues
XGetGCValues.restype = c_int
XGetGCValues.argtypes = [POINTER(Display), GC, c_ulong, POINTER(XGCValues)]
# /usr/include/X11/Xlib.h:2630
XGetGeometry = _lib.XGetGeometry
XGetGeometry.restype = c_int
XGetGeometry.argtypes = [
POINTER(Display),
Drawable,
POINTER(Window),
POINTER(c_int),
POINTER(c_int),
POINTER(c_uint),
POINTER(c_uint),
POINTER(c_uint),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:2642
XGetIconName = _lib.XGetIconName
XGetIconName.restype = c_int
XGetIconName.argtypes = [POINTER(Display), Window, POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:2648
XGetInputFocus = _lib.XGetInputFocus
XGetInputFocus.restype = c_int
XGetInputFocus.argtypes = [POINTER(Display), POINTER(Window), POINTER(c_int)]
# /usr/include/X11/Xlib.h:2654
XGetKeyboardControl = _lib.XGetKeyboardControl
XGetKeyboardControl.restype = c_int
XGetKeyboardControl.argtypes = [POINTER(Display), POINTER(XKeyboardState)]
# /usr/include/X11/Xlib.h:2659
XGetPointerControl = _lib.XGetPointerControl
XGetPointerControl.restype = c_int
XGetPointerControl.argtypes = [
POINTER(Display),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:2666
XGetPointerMapping = _lib.XGetPointerMapping
XGetPointerMapping.restype = c_int
XGetPointerMapping.argtypes = [POINTER(Display), POINTER(c_ubyte), c_int]
# /usr/include/X11/Xlib.h:2672
XGetScreenSaver = _lib.XGetScreenSaver
XGetScreenSaver.restype = c_int
XGetScreenSaver.argtypes = [
POINTER(Display),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:2680
XGetTransientForHint = _lib.XGetTransientForHint
XGetTransientForHint.restype = c_int
XGetTransientForHint.argtypes = [POINTER(Display), Window, POINTER(Window)]
# /usr/include/X11/Xlib.h:2686
XGetWindowProperty = _lib.XGetWindowProperty
XGetWindowProperty.restype = c_int
XGetWindowProperty.argtypes = [
POINTER(Display),
Window,
Atom,
c_long,
c_long,
c_int,
Atom,
POINTER(Atom),
POINTER(c_int),
POINTER(c_ulong),
POINTER(c_ulong),
POINTER(
POINTER(c_ubyte))]
# /usr/include/X11/Xlib.h:2701
XGetWindowAttributes = _lib.XGetWindowAttributes
XGetWindowAttributes.restype = c_int
XGetWindowAttributes.argtypes = [
POINTER(Display),
Window,
POINTER(XWindowAttributes)]
# /usr/include/X11/Xlib.h:2707
XGrabButton = _lib.XGrabButton
XGrabButton.restype = c_int
XGrabButton.argtypes = [
POINTER(Display),
c_uint,
c_uint,
Window,
c_int,
c_uint,
c_int,
c_int,
Window,
Cursor]
# /usr/include/X11/Xlib.h:2720
XGrabKey = _lib.XGrabKey
XGrabKey.restype = c_int
XGrabKey.argtypes = [
POINTER(Display),
c_int,
c_uint,
Window,
c_int,
c_int,
c_int]
# /usr/include/X11/Xlib.h:2730
XGrabKeyboard = _lib.XGrabKeyboard
XGrabKeyboard.restype = c_int
XGrabKeyboard.argtypes = [POINTER(Display), Window, c_int, c_int, c_int, Time]
# /usr/include/X11/Xlib.h:2739
XGrabPointer = _lib.XGrabPointer
XGrabPointer.restype = c_int
XGrabPointer.argtypes = [
POINTER(Display),
Window,
c_int,
c_uint,
c_int,
c_int,
Window,
Cursor,
Time]
# /usr/include/X11/Xlib.h:2751
XGrabServer = _lib.XGrabServer
XGrabServer.restype = c_int
XGrabServer.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2755
XHeightMMOfScreen = _lib.XHeightMMOfScreen
XHeightMMOfScreen.restype = c_int
XHeightMMOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2759
XHeightOfScreen = _lib.XHeightOfScreen
XHeightOfScreen.restype = c_int
XHeightOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2763
XIfEvent = _lib.XIfEvent
XIfEvent.restype = c_int
XIfEvent.argtypes = [
POINTER(Display),
POINTER(XEvent),
CFUNCTYPE(
c_int,
POINTER(Display),
POINTER(XEvent),
XPointer),
XPointer]
# /usr/include/X11/Xlib.h:2774
XImageByteOrder = _lib.XImageByteOrder
XImageByteOrder.restype = c_int
XImageByteOrder.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2778
XInstallColormap = _lib.XInstallColormap
XInstallColormap.restype = c_int
XInstallColormap.argtypes = [POINTER(Display), Colormap]
# /usr/include/X11/Xlib.h:2783
XKeysymToKeycode = _lib.XKeysymToKeycode
XKeysymToKeycode.restype = KeyCode
XKeysymToKeycode.argtypes = [POINTER(Display), KeySym]
# /usr/include/X11/Xlib.h:2788
XKillClient = _lib.XKillClient
XKillClient.restype = c_int
XKillClient.argtypes = [POINTER(Display), XID]
# /usr/include/X11/Xlib.h:2793
XLookupColor = _lib.XLookupColor
XLookupColor.restype = c_int
XLookupColor.argtypes = [
POINTER(Display),
Colormap,
c_char_p,
POINTER(XColor),
POINTER(XColor)]
# /usr/include/X11/Xlib.h:2801
XLowerWindow = _lib.XLowerWindow
XLowerWindow.restype = c_int
XLowerWindow.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2806
XMapRaised = _lib.XMapRaised
XMapRaised.restype = c_int
XMapRaised.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2811
XMapSubwindows = _lib.XMapSubwindows
XMapSubwindows.restype = c_int
XMapSubwindows.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2816
XMapWindow = _lib.XMapWindow
XMapWindow.restype = c_int
XMapWindow.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:2821
XMaskEvent = _lib.XMaskEvent
XMaskEvent.restype = c_int
XMaskEvent.argtypes = [POINTER(Display), c_long, POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2827
XMaxCmapsOfScreen = _lib.XMaxCmapsOfScreen
XMaxCmapsOfScreen.restype = c_int
XMaxCmapsOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2831
XMinCmapsOfScreen = _lib.XMinCmapsOfScreen
XMinCmapsOfScreen.restype = c_int
XMinCmapsOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2835
XMoveResizeWindow = _lib.XMoveResizeWindow
XMoveResizeWindow.restype = c_int
XMoveResizeWindow.argtypes = [
POINTER(Display),
Window,
c_int,
c_int,
c_uint,
c_uint]
# /usr/include/X11/Xlib.h:2844
XMoveWindow = _lib.XMoveWindow
XMoveWindow.restype = c_int
XMoveWindow.argtypes = [POINTER(Display), Window, c_int, c_int]
# /usr/include/X11/Xlib.h:2851
XNextEvent = _lib.XNextEvent
XNextEvent.restype = c_int
XNextEvent.argtypes = [POINTER(Display), POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2856
XNoOp = _lib.XNoOp
XNoOp.restype = c_int
XNoOp.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2860
XParseColor = _lib.XParseColor
XParseColor.restype = c_int
XParseColor.argtypes = [POINTER(Display), Colormap, c_char_p, POINTER(XColor)]
# /usr/include/X11/Xlib.h:2867
XParseGeometry = _lib.XParseGeometry
XParseGeometry.restype = c_int
XParseGeometry.argtypes = [
c_char_p,
POINTER(c_int),
POINTER(c_int),
POINTER(c_uint),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:2875
XPeekEvent = _lib.XPeekEvent
XPeekEvent.restype = c_int
XPeekEvent.argtypes = [POINTER(Display), POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2880
XPeekIfEvent = _lib.XPeekIfEvent
XPeekIfEvent.restype = c_int
XPeekIfEvent.argtypes = [
POINTER(Display),
POINTER(XEvent),
CFUNCTYPE(
c_int,
POINTER(Display),
POINTER(XEvent),
XPointer),
XPointer]
# /usr/include/X11/Xlib.h:2891
XPending = _lib.XPending
XPending.restype = c_int
XPending.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2895
XPlanesOfScreen = _lib.XPlanesOfScreen
XPlanesOfScreen.restype = c_int
XPlanesOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:2899
XProtocolRevision = _lib.XProtocolRevision
XProtocolRevision.restype = c_int
XProtocolRevision.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2903
XProtocolVersion = _lib.XProtocolVersion
XProtocolVersion.restype = c_int
XProtocolVersion.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2908
XPutBackEvent = _lib.XPutBackEvent
XPutBackEvent.restype = c_int
XPutBackEvent.argtypes = [POINTER(Display), POINTER(XEvent)]
# /usr/include/X11/Xlib.h:2913
XPutImage = _lib.XPutImage
XPutImage.restype = c_int
XPutImage.argtypes = [
POINTER(Display),
Drawable,
GC,
POINTER(XImage),
c_int,
c_int,
c_int,
c_int,
c_uint,
c_uint]
# /usr/include/X11/Xlib.h:2926
XQLength = _lib.XQLength
XQLength.restype = c_int
XQLength.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:2930
XQueryBestCursor = _lib.XQueryBestCursor
XQueryBestCursor.restype = c_int
XQueryBestCursor.argtypes = [
POINTER(Display),
Drawable,
c_uint,
c_uint,
POINTER(c_uint),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:2939
XQueryBestSize = _lib.XQueryBestSize
XQueryBestSize.restype = c_int
XQueryBestSize.argtypes = [
POINTER(Display),
c_int,
Drawable,
c_uint,
c_uint,
POINTER(c_uint),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:2949
XQueryBestStipple = _lib.XQueryBestStipple
XQueryBestStipple.restype = c_int
XQueryBestStipple.argtypes = [
POINTER(Display),
Drawable,
c_uint,
c_uint,
POINTER(c_uint),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:2958
XQueryBestTile = _lib.XQueryBestTile
XQueryBestTile.restype = c_int
XQueryBestTile.argtypes = [
POINTER(Display),
Drawable,
c_uint,
c_uint,
POINTER(c_uint),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:2967
XQueryColor = _lib.XQueryColor
XQueryColor.restype = c_int
XQueryColor.argtypes = [POINTER(Display), Colormap, POINTER(XColor)]
# /usr/include/X11/Xlib.h:2973
XQueryColors = _lib.XQueryColors
XQueryColors.restype = c_int
XQueryColors.argtypes = [POINTER(Display), Colormap, POINTER(XColor), c_int]
# /usr/include/X11/Xlib.h:2980
XQueryExtension = _lib.XQueryExtension
XQueryExtension.restype = c_int
XQueryExtension.argtypes = [
POINTER(Display),
c_char_p,
POINTER(c_int),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:2988
XQueryKeymap = _lib.XQueryKeymap
XQueryKeymap.restype = c_int
XQueryKeymap.argtypes = [POINTER(Display), c_char * 32]
# /usr/include/X11/Xlib.h:2993
XQueryPointer = _lib.XQueryPointer
XQueryPointer.restype = c_int
XQueryPointer.argtypes = [
POINTER(Display),
Window,
POINTER(Window),
POINTER(Window),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:3005
XQueryTextExtents = _lib.XQueryTextExtents
XQueryTextExtents.restype = c_int
XQueryTextExtents.argtypes = [
POINTER(Display),
XID,
c_char_p,
c_int,
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(XCharStruct)]
# /usr/include/X11/Xlib.h:3016
XQueryTextExtents16 = _lib.XQueryTextExtents16
XQueryTextExtents16.restype = c_int
XQueryTextExtents16.argtypes = [
POINTER(Display),
XID,
POINTER(XChar2b),
c_int,
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(XCharStruct)]
# /usr/include/X11/Xlib.h:3027
XQueryTree = _lib.XQueryTree
XQueryTree.restype = c_int
XQueryTree.argtypes = [
POINTER(Display),
Window,
POINTER(Window),
POINTER(Window),
POINTER(
POINTER(Window)),
POINTER(c_uint)]
# /usr/include/X11/Xlib.h:3036
XRaiseWindow = _lib.XRaiseWindow
XRaiseWindow.restype = c_int
XRaiseWindow.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:3041
XReadBitmapFile = _lib.XReadBitmapFile
XReadBitmapFile.restype = c_int
XReadBitmapFile.argtypes = [
POINTER(Display),
Drawable,
c_char_p,
POINTER(c_uint),
POINTER(c_uint),
POINTER(Pixmap),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:3052
XReadBitmapFileData = _lib.XReadBitmapFileData
XReadBitmapFileData.restype = c_int
XReadBitmapFileData.argtypes = [
c_char_p,
POINTER(c_uint),
POINTER(c_uint),
POINTER(
POINTER(c_ubyte)),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:3061
XRebindKeysym = _lib.XRebindKeysym
XRebindKeysym.restype = c_int
XRebindKeysym.argtypes = [
POINTER(Display),
KeySym,
POINTER(KeySym),
c_int,
POINTER(c_ubyte),
c_int]
# /usr/include/X11/Xlib.h:3070
XRecolorCursor = _lib.XRecolorCursor
XRecolorCursor.restype = c_int
XRecolorCursor.argtypes = [
POINTER(Display),
Cursor,
POINTER(XColor),
POINTER(XColor)]
# /usr/include/X11/Xlib.h:3077
XRefreshKeyboardMapping = _lib.XRefreshKeyboardMapping
XRefreshKeyboardMapping.restype = c_int
XRefreshKeyboardMapping.argtypes = [POINTER(XMappingEvent)]
# /usr/include/X11/Xlib.h:3081
XRemoveFromSaveSet = _lib.XRemoveFromSaveSet
XRemoveFromSaveSet.restype = c_int
XRemoveFromSaveSet.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:3086
XRemoveHost = _lib.XRemoveHost
XRemoveHost.restype = c_int
XRemoveHost.argtypes = [POINTER(Display), POINTER(XHostAddress)]
# /usr/include/X11/Xlib.h:3091
XRemoveHosts = _lib.XRemoveHosts
XRemoveHosts.restype = c_int
XRemoveHosts.argtypes = [POINTER(Display), POINTER(XHostAddress), c_int]
# /usr/include/X11/Xlib.h:3097
XReparentWindow = _lib.XReparentWindow
XReparentWindow.restype = c_int
XReparentWindow.argtypes = [POINTER(Display), Window, Window, c_int, c_int]
# /usr/include/X11/Xlib.h:3105
XResetScreenSaver = _lib.XResetScreenSaver
XResetScreenSaver.restype = c_int
XResetScreenSaver.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:3109
XResizeWindow = _lib.XResizeWindow
XResizeWindow.restype = c_int
XResizeWindow.argtypes = [POINTER(Display), Window, c_uint, c_uint]
# /usr/include/X11/Xlib.h:3116
XRestackWindows = _lib.XRestackWindows
XRestackWindows.restype = c_int
XRestackWindows.argtypes = [POINTER(Display), POINTER(Window), c_int]
# /usr/include/X11/Xlib.h:3122
XRotateBuffers = _lib.XRotateBuffers
XRotateBuffers.restype = c_int
XRotateBuffers.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:3127
XRotateWindowProperties = _lib.XRotateWindowProperties
XRotateWindowProperties.restype = c_int
XRotateWindowProperties.argtypes = [
POINTER(Display), Window, POINTER(Atom), c_int, c_int]
# /usr/include/X11/Xlib.h:3135
XScreenCount = _lib.XScreenCount
XScreenCount.restype = c_int
XScreenCount.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:3139
XSelectInput = _lib.XSelectInput
XSelectInput.restype = c_int
XSelectInput.argtypes = [POINTER(Display), Window, c_long]
# /usr/include/X11/Xlib.h:3145
XSendEvent = _lib.XSendEvent
XSendEvent.restype = c_int
XSendEvent.argtypes = [
POINTER(Display),
Window,
c_int,
c_long,
POINTER(XEvent)]
# /usr/include/X11/Xlib.h:3153
XSetAccessControl = _lib.XSetAccessControl
XSetAccessControl.restype = c_int
XSetAccessControl.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:3158
XSetArcMode = _lib.XSetArcMode
XSetArcMode.restype = c_int
XSetArcMode.argtypes = [POINTER(Display), GC, c_int]
# /usr/include/X11/Xlib.h:3164
XSetBackground = _lib.XSetBackground
XSetBackground.restype = c_int
XSetBackground.argtypes = [POINTER(Display), GC, c_ulong]
# /usr/include/X11/Xlib.h:3170
XSetClipMask = _lib.XSetClipMask
XSetClipMask.restype = c_int
XSetClipMask.argtypes = [POINTER(Display), GC, Pixmap]
# /usr/include/X11/Xlib.h:3176
XSetClipOrigin = _lib.XSetClipOrigin
XSetClipOrigin.restype = c_int
XSetClipOrigin.argtypes = [POINTER(Display), GC, c_int, c_int]
# /usr/include/X11/Xlib.h:3183
XSetClipRectangles = _lib.XSetClipRectangles
XSetClipRectangles.restype = c_int
XSetClipRectangles.argtypes = [
POINTER(Display),
GC,
c_int,
c_int,
POINTER(XRectangle),
c_int,
c_int]
# /usr/include/X11/Xlib.h:3193
XSetCloseDownMode = _lib.XSetCloseDownMode
XSetCloseDownMode.restype = c_int
XSetCloseDownMode.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:3198
XSetCommand = _lib.XSetCommand
XSetCommand.restype = c_int
XSetCommand.argtypes = [POINTER(Display), Window, POINTER(c_char_p), c_int]
# /usr/include/X11/Xlib.h:3205
XSetDashes = _lib.XSetDashes
XSetDashes.restype = c_int
XSetDashes.argtypes = [POINTER(Display), GC, c_int, c_char_p, c_int]
# /usr/include/X11/Xlib.h:3213
XSetFillRule = _lib.XSetFillRule
XSetFillRule.restype = c_int
XSetFillRule.argtypes = [POINTER(Display), GC, c_int]
# /usr/include/X11/Xlib.h:3219
XSetFillStyle = _lib.XSetFillStyle
XSetFillStyle.restype = c_int
XSetFillStyle.argtypes = [POINTER(Display), GC, c_int]
# /usr/include/X11/Xlib.h:3225
XSetFont = _lib.XSetFont
XSetFont.restype = c_int
XSetFont.argtypes = [POINTER(Display), GC, Font]
# /usr/include/X11/Xlib.h:3231
XSetFontPath = _lib.XSetFontPath
XSetFontPath.restype = c_int
XSetFontPath.argtypes = [POINTER(Display), POINTER(c_char_p), c_int]
# /usr/include/X11/Xlib.h:3237
XSetForeground = _lib.XSetForeground
XSetForeground.restype = c_int
XSetForeground.argtypes = [POINTER(Display), GC, c_ulong]
# /usr/include/X11/Xlib.h:3243
XSetFunction = _lib.XSetFunction
XSetFunction.restype = c_int
XSetFunction.argtypes = [POINTER(Display), GC, c_int]
# /usr/include/X11/Xlib.h:3249
XSetGraphicsExposures = _lib.XSetGraphicsExposures
XSetGraphicsExposures.restype = c_int
XSetGraphicsExposures.argtypes = [POINTER(Display), GC, c_int]
# /usr/include/X11/Xlib.h:3255
XSetIconName = _lib.XSetIconName
XSetIconName.restype = c_int
XSetIconName.argtypes = [POINTER(Display), Window, c_char_p]
# /usr/include/X11/Xlib.h:3261
XSetInputFocus = _lib.XSetInputFocus
XSetInputFocus.restype = c_int
XSetInputFocus.argtypes = [POINTER(Display), Window, c_int, Time]
# /usr/include/X11/Xlib.h:3268
XSetLineAttributes = _lib.XSetLineAttributes
XSetLineAttributes.restype = c_int
XSetLineAttributes.argtypes = [
POINTER(Display),
GC,
c_uint,
c_int,
c_int,
c_int]
# /usr/include/X11/Xlib.h:3277
XSetModifierMapping = _lib.XSetModifierMapping
XSetModifierMapping.restype = c_int
XSetModifierMapping.argtypes = [POINTER(Display), POINTER(XModifierKeymap)]
# /usr/include/X11/Xlib.h:3282
XSetPlaneMask = _lib.XSetPlaneMask
XSetPlaneMask.restype = c_int
XSetPlaneMask.argtypes = [POINTER(Display), GC, c_ulong]
# /usr/include/X11/Xlib.h:3288
XSetPointerMapping = _lib.XSetPointerMapping
XSetPointerMapping.restype = c_int
XSetPointerMapping.argtypes = [POINTER(Display), POINTER(c_ubyte), c_int]
# /usr/include/X11/Xlib.h:3294
XSetScreenSaver = _lib.XSetScreenSaver
XSetScreenSaver.restype = c_int
XSetScreenSaver.argtypes = [POINTER(Display), c_int, c_int, c_int, c_int]
# /usr/include/X11/Xlib.h:3302
XSetSelectionOwner = _lib.XSetSelectionOwner
XSetSelectionOwner.restype = c_int
XSetSelectionOwner.argtypes = [POINTER(Display), Atom, Window, Time]
# /usr/include/X11/Xlib.h:3309
XSetState = _lib.XSetState
XSetState.restype = c_int
XSetState.argtypes = [POINTER(Display), GC, c_ulong, c_ulong, c_int, c_ulong]
# /usr/include/X11/Xlib.h:3318
XSetStipple = _lib.XSetStipple
XSetStipple.restype = c_int
XSetStipple.argtypes = [POINTER(Display), GC, Pixmap]
# /usr/include/X11/Xlib.h:3324
XSetSubwindowMode = _lib.XSetSubwindowMode
XSetSubwindowMode.restype = c_int
XSetSubwindowMode.argtypes = [POINTER(Display), GC, c_int]
# /usr/include/X11/Xlib.h:3330
XSetTSOrigin = _lib.XSetTSOrigin
XSetTSOrigin.restype = c_int
XSetTSOrigin.argtypes = [POINTER(Display), GC, c_int, c_int]
# /usr/include/X11/Xlib.h:3337
XSetTile = _lib.XSetTile
XSetTile.restype = c_int
XSetTile.argtypes = [POINTER(Display), GC, Pixmap]
# /usr/include/X11/Xlib.h:3343
XSetWindowBackground = _lib.XSetWindowBackground
XSetWindowBackground.restype = c_int
XSetWindowBackground.argtypes = [POINTER(Display), Window, c_ulong]
# /usr/include/X11/Xlib.h:3349
XSetWindowBackgroundPixmap = _lib.XSetWindowBackgroundPixmap
XSetWindowBackgroundPixmap.restype = c_int
XSetWindowBackgroundPixmap.argtypes = [POINTER(Display), Window, Pixmap]
# /usr/include/X11/Xlib.h:3355
XSetWindowBorder = _lib.XSetWindowBorder
XSetWindowBorder.restype = c_int
XSetWindowBorder.argtypes = [POINTER(Display), Window, c_ulong]
# /usr/include/X11/Xlib.h:3361
XSetWindowBorderPixmap = _lib.XSetWindowBorderPixmap
XSetWindowBorderPixmap.restype = c_int
XSetWindowBorderPixmap.argtypes = [POINTER(Display), Window, Pixmap]
# /usr/include/X11/Xlib.h:3367
XSetWindowBorderWidth = _lib.XSetWindowBorderWidth
XSetWindowBorderWidth.restype = c_int
XSetWindowBorderWidth.argtypes = [POINTER(Display), Window, c_uint]
# /usr/include/X11/Xlib.h:3373
XSetWindowColormap = _lib.XSetWindowColormap
XSetWindowColormap.restype = c_int
XSetWindowColormap.argtypes = [POINTER(Display), Window, Colormap]
# /usr/include/X11/Xlib.h:3379
XStoreBuffer = _lib.XStoreBuffer
XStoreBuffer.restype = c_int
XStoreBuffer.argtypes = [POINTER(Display), c_char_p, c_int, c_int]
# /usr/include/X11/Xlib.h:3386
XStoreBytes = _lib.XStoreBytes
XStoreBytes.restype = c_int
XStoreBytes.argtypes = [POINTER(Display), c_char_p, c_int]
# /usr/include/X11/Xlib.h:3392
XStoreColor = _lib.XStoreColor
XStoreColor.restype = c_int
XStoreColor.argtypes = [POINTER(Display), Colormap, POINTER(XColor)]
# /usr/include/X11/Xlib.h:3398
XStoreColors = _lib.XStoreColors
XStoreColors.restype = c_int
XStoreColors.argtypes = [POINTER(Display), Colormap, POINTER(XColor), c_int]
# /usr/include/X11/Xlib.h:3405
XStoreName = _lib.XStoreName
XStoreName.restype = c_int
XStoreName.argtypes = [POINTER(Display), Window, c_char_p]
# /usr/include/X11/Xlib.h:3411
XStoreNamedColor = _lib.XStoreNamedColor
XStoreNamedColor.restype = c_int
XStoreNamedColor.argtypes = [
POINTER(Display),
Colormap,
c_char_p,
c_ulong,
c_int]
# /usr/include/X11/Xlib.h:3419
XSync = _lib.XSync
XSync.restype = c_int
XSync.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:3424
XTextExtents = _lib.XTextExtents
XTextExtents.restype = c_int
XTextExtents.argtypes = [
POINTER(XFontStruct),
c_char_p,
c_int,
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(XCharStruct)]
# /usr/include/X11/Xlib.h:3434
XTextExtents16 = _lib.XTextExtents16
XTextExtents16.restype = c_int
XTextExtents16.argtypes = [
POINTER(XFontStruct),
POINTER(XChar2b),
c_int,
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(XCharStruct)]
# /usr/include/X11/Xlib.h:3444
XTextWidth = _lib.XTextWidth
XTextWidth.restype = c_int
XTextWidth.argtypes = [POINTER(XFontStruct), c_char_p, c_int]
# /usr/include/X11/Xlib.h:3450
XTextWidth16 = _lib.XTextWidth16
XTextWidth16.restype = c_int
XTextWidth16.argtypes = [POINTER(XFontStruct), POINTER(XChar2b), c_int]
# /usr/include/X11/Xlib.h:3456
XTranslateCoordinates = _lib.XTranslateCoordinates
XTranslateCoordinates.restype = c_int
XTranslateCoordinates.argtypes = [
POINTER(Display),
Window,
Window,
c_int,
c_int,
POINTER(c_int),
POINTER(c_int),
POINTER(Window)]
# /usr/include/X11/Xlib.h:3467
XUndefineCursor = _lib.XUndefineCursor
XUndefineCursor.restype = c_int
XUndefineCursor.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:3472
XUngrabButton = _lib.XUngrabButton
XUngrabButton.restype = c_int
XUngrabButton.argtypes = [POINTER(Display), c_uint, c_uint, Window]
# /usr/include/X11/Xlib.h:3479
XUngrabKey = _lib.XUngrabKey
XUngrabKey.restype = c_int
XUngrabKey.argtypes = [POINTER(Display), c_int, c_uint, Window]
# /usr/include/X11/Xlib.h:3486
XUngrabKeyboard = _lib.XUngrabKeyboard
XUngrabKeyboard.restype = c_int
XUngrabKeyboard.argtypes = [POINTER(Display), Time]
# /usr/include/X11/Xlib.h:3491
XUngrabPointer = _lib.XUngrabPointer
XUngrabPointer.restype = c_int
XUngrabPointer.argtypes = [POINTER(Display), Time]
# /usr/include/X11/Xlib.h:3496
XUngrabServer = _lib.XUngrabServer
XUngrabServer.restype = c_int
XUngrabServer.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:3500
XUninstallColormap = _lib.XUninstallColormap
XUninstallColormap.restype = c_int
XUninstallColormap.argtypes = [POINTER(Display), Colormap]
# /usr/include/X11/Xlib.h:3505
XUnloadFont = _lib.XUnloadFont
XUnloadFont.restype = c_int
XUnloadFont.argtypes = [POINTER(Display), Font]
# /usr/include/X11/Xlib.h:3510
XUnmapSubwindows = _lib.XUnmapSubwindows
XUnmapSubwindows.restype = c_int
XUnmapSubwindows.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:3515
XUnmapWindow = _lib.XUnmapWindow
XUnmapWindow.restype = c_int
XUnmapWindow.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xlib.h:3520
XVendorRelease = _lib.XVendorRelease
XVendorRelease.restype = c_int
XVendorRelease.argtypes = [POINTER(Display)]
# /usr/include/X11/Xlib.h:3524
XWarpPointer = _lib.XWarpPointer
XWarpPointer.restype = c_int
XWarpPointer.argtypes = [
POINTER(Display),
Window,
Window,
c_int,
c_int,
c_uint,
c_uint,
c_int,
c_int]
# /usr/include/X11/Xlib.h:3536
XWidthMMOfScreen = _lib.XWidthMMOfScreen
XWidthMMOfScreen.restype = c_int
XWidthMMOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:3540
XWidthOfScreen = _lib.XWidthOfScreen
XWidthOfScreen.restype = c_int
XWidthOfScreen.argtypes = [POINTER(Screen)]
# /usr/include/X11/Xlib.h:3544
XWindowEvent = _lib.XWindowEvent
XWindowEvent.restype = c_int
XWindowEvent.argtypes = [POINTER(Display), Window, c_long, POINTER(XEvent)]
# /usr/include/X11/Xlib.h:3551
XWriteBitmapFile = _lib.XWriteBitmapFile
XWriteBitmapFile.restype = c_int
XWriteBitmapFile.argtypes = [
POINTER(Display),
c_char_p,
Pixmap,
c_uint,
c_uint,
c_int,
c_int]
# /usr/include/X11/Xlib.h:3561
XSupportsLocale = _lib.XSupportsLocale
XSupportsLocale.restype = c_int
XSupportsLocale.argtypes = []
# /usr/include/X11/Xlib.h:3563
XSetLocaleModifiers = _lib.XSetLocaleModifiers
XSetLocaleModifiers.restype = c_char_p
XSetLocaleModifiers.argtypes = [c_char_p]
class struct__XrmHashBucketRec(Structure):
__slots__ = [
]
struct__XrmHashBucketRec._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/X11/Xlib.h:3567
XOpenOM = _lib.XOpenOM
XOpenOM.restype = XOM
XOpenOM.argtypes = [
POINTER(Display),
POINTER(struct__XrmHashBucketRec),
c_char_p,
c_char_p]
# /usr/include/X11/Xlib.h:3574
XCloseOM = _lib.XCloseOM
XCloseOM.restype = c_int
XCloseOM.argtypes = [XOM]
# /usr/include/X11/Xlib.h:3578
XSetOMValues = _lib.XSetOMValues
XSetOMValues.restype = c_char_p
XSetOMValues.argtypes = [XOM]
# /usr/include/X11/Xlib.h:3583
XGetOMValues = _lib.XGetOMValues
XGetOMValues.restype = c_char_p
XGetOMValues.argtypes = [XOM]
# /usr/include/X11/Xlib.h:3588
XDisplayOfOM = _lib.XDisplayOfOM
XDisplayOfOM.restype = POINTER(Display)
XDisplayOfOM.argtypes = [XOM]
# /usr/include/X11/Xlib.h:3592
XLocaleOfOM = _lib.XLocaleOfOM
XLocaleOfOM.restype = c_char_p
XLocaleOfOM.argtypes = [XOM]
# /usr/include/X11/Xlib.h:3596
XCreateOC = _lib.XCreateOC
XCreateOC.restype = XOC
XCreateOC.argtypes = [XOM]
# /usr/include/X11/Xlib.h:3601
XDestroyOC = _lib.XDestroyOC
XDestroyOC.restype = None
XDestroyOC.argtypes = [XOC]
# /usr/include/X11/Xlib.h:3605
XOMOfOC = _lib.XOMOfOC
XOMOfOC.restype = XOM
XOMOfOC.argtypes = [XOC]
# /usr/include/X11/Xlib.h:3609
XSetOCValues = _lib.XSetOCValues
XSetOCValues.restype = c_char_p
XSetOCValues.argtypes = [XOC]
# /usr/include/X11/Xlib.h:3614
XGetOCValues = _lib.XGetOCValues
XGetOCValues.restype = c_char_p
XGetOCValues.argtypes = [XOC]
# /usr/include/X11/Xlib.h:3619
XCreateFontSet = _lib.XCreateFontSet
XCreateFontSet.restype = XFontSet
XCreateFontSet.argtypes = [
POINTER(Display),
c_char_p,
POINTER(
POINTER(c_char_p)),
POINTER(c_int),
POINTER(c_char_p)]
# /usr/include/X11/Xlib.h:3627
XFreeFontSet = _lib.XFreeFontSet
XFreeFontSet.restype = None
XFreeFontSet.argtypes = [POINTER(Display), XFontSet]
# /usr/include/X11/Xlib.h:3632
XFontsOfFontSet = _lib.XFontsOfFontSet
XFontsOfFontSet.restype = c_int
XFontsOfFontSet.argtypes = [
XFontSet, POINTER(
POINTER(
POINTER(XFontStruct))), POINTER(
POINTER(c_char_p))]
# /usr/include/X11/Xlib.h:3638
XBaseFontNameListOfFontSet = _lib.XBaseFontNameListOfFontSet
XBaseFontNameListOfFontSet.restype = c_char_p
XBaseFontNameListOfFontSet.argtypes = [XFontSet]
# /usr/include/X11/Xlib.h:3642
XLocaleOfFontSet = _lib.XLocaleOfFontSet
XLocaleOfFontSet.restype = c_char_p
XLocaleOfFontSet.argtypes = [XFontSet]
# /usr/include/X11/Xlib.h:3646
XContextDependentDrawing = _lib.XContextDependentDrawing
XContextDependentDrawing.restype = c_int
XContextDependentDrawing.argtypes = [XFontSet]
# /usr/include/X11/Xlib.h:3650
XDirectionalDependentDrawing = _lib.XDirectionalDependentDrawing
XDirectionalDependentDrawing.restype = c_int
XDirectionalDependentDrawing.argtypes = [XFontSet]
# /usr/include/X11/Xlib.h:3654
XContextualDrawing = _lib.XContextualDrawing
XContextualDrawing.restype = c_int
XContextualDrawing.argtypes = [XFontSet]
# /usr/include/X11/Xlib.h:3658
XExtentsOfFontSet = _lib.XExtentsOfFontSet
XExtentsOfFontSet.restype = POINTER(XFontSetExtents)
XExtentsOfFontSet.argtypes = [XFontSet]
# /usr/include/X11/Xlib.h:3662
XmbTextEscapement = _lib.XmbTextEscapement
XmbTextEscapement.restype = c_int
XmbTextEscapement.argtypes = [XFontSet, c_char_p, c_int]
# /usr/include/X11/Xlib.h:3668
XwcTextEscapement = _lib.XwcTextEscapement
XwcTextEscapement.restype = c_int
XwcTextEscapement.argtypes = [XFontSet, c_wchar_p, c_int]
# /usr/include/X11/Xlib.h:3674
Xutf8TextEscapement = _lib.Xutf8TextEscapement
Xutf8TextEscapement.restype = c_int
Xutf8TextEscapement.argtypes = [XFontSet, c_char_p, c_int]
# /usr/include/X11/Xlib.h:3680
XmbTextExtents = _lib.XmbTextExtents
XmbTextExtents.restype = c_int
XmbTextExtents.argtypes = [
XFontSet,
c_char_p,
c_int,
POINTER(XRectangle),
POINTER(XRectangle)]
# /usr/include/X11/Xlib.h:3688
XwcTextExtents = _lib.XwcTextExtents
XwcTextExtents.restype = c_int
XwcTextExtents.argtypes = [
XFontSet,
c_wchar_p,
c_int,
POINTER(XRectangle),
POINTER(XRectangle)]
# /usr/include/X11/Xlib.h:3696
Xutf8TextExtents = _lib.Xutf8TextExtents
Xutf8TextExtents.restype = c_int
Xutf8TextExtents.argtypes = [
XFontSet,
c_char_p,
c_int,
POINTER(XRectangle),
POINTER(XRectangle)]
# /usr/include/X11/Xlib.h:3704
XmbTextPerCharExtents = _lib.XmbTextPerCharExtents
XmbTextPerCharExtents.restype = c_int
XmbTextPerCharExtents.argtypes = [
XFontSet,
c_char_p,
c_int,
POINTER(XRectangle),
POINTER(XRectangle),
c_int,
POINTER(c_int),
POINTER(XRectangle),
POINTER(XRectangle)]
# /usr/include/X11/Xlib.h:3716
XwcTextPerCharExtents = _lib.XwcTextPerCharExtents
XwcTextPerCharExtents.restype = c_int
XwcTextPerCharExtents.argtypes = [
XFontSet,
c_wchar_p,
c_int,
POINTER(XRectangle),
POINTER(XRectangle),
c_int,
POINTER(c_int),
POINTER(XRectangle),
POINTER(XRectangle)]
# /usr/include/X11/Xlib.h:3728
Xutf8TextPerCharExtents = _lib.Xutf8TextPerCharExtents
Xutf8TextPerCharExtents.restype = c_int
Xutf8TextPerCharExtents.argtypes = [
XFontSet,
c_char_p,
c_int,
POINTER(XRectangle),
POINTER(XRectangle),
c_int,
POINTER(c_int),
POINTER(XRectangle),
POINTER(XRectangle)]
# /usr/include/X11/Xlib.h:3740
XmbDrawText = _lib.XmbDrawText
XmbDrawText.restype = None
XmbDrawText.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XmbTextItem),
c_int]
# /usr/include/X11/Xlib.h:3750
XwcDrawText = _lib.XwcDrawText
XwcDrawText.restype = None
XwcDrawText.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XwcTextItem),
c_int]
# /usr/include/X11/Xlib.h:3760
Xutf8DrawText = _lib.Xutf8DrawText
Xutf8DrawText.restype = None
Xutf8DrawText.argtypes = [
POINTER(Display),
Drawable,
GC,
c_int,
c_int,
POINTER(XmbTextItem),
c_int]
# /usr/include/X11/Xlib.h:3770
XmbDrawString = _lib.XmbDrawString
XmbDrawString.restype = None
XmbDrawString.argtypes = [
POINTER(Display),
Drawable,
XFontSet,
GC,
c_int,
c_int,
c_char_p,
c_int]
# /usr/include/X11/Xlib.h:3781
XwcDrawString = _lib.XwcDrawString
XwcDrawString.restype = None
XwcDrawString.argtypes = [
POINTER(Display),
Drawable,
XFontSet,
GC,
c_int,
c_int,
c_wchar_p,
c_int]
# /usr/include/X11/Xlib.h:3792
Xutf8DrawString = _lib.Xutf8DrawString
Xutf8DrawString.restype = None
Xutf8DrawString.argtypes = [
POINTER(Display),
Drawable,
XFontSet,
GC,
c_int,
c_int,
c_char_p,
c_int]
# /usr/include/X11/Xlib.h:3803
XmbDrawImageString = _lib.XmbDrawImageString
XmbDrawImageString.restype = None
XmbDrawImageString.argtypes = [
POINTER(Display),
Drawable,
XFontSet,
GC,
c_int,
c_int,
c_char_p,
c_int]
# /usr/include/X11/Xlib.h:3814
XwcDrawImageString = _lib.XwcDrawImageString
XwcDrawImageString.restype = None
XwcDrawImageString.argtypes = [
POINTER(Display),
Drawable,
XFontSet,
GC,
c_int,
c_int,
c_wchar_p,
c_int]
# /usr/include/X11/Xlib.h:3825
Xutf8DrawImageString = _lib.Xutf8DrawImageString
Xutf8DrawImageString.restype = None
Xutf8DrawImageString.argtypes = [
POINTER(Display),
Drawable,
XFontSet,
GC,
c_int,
c_int,
c_char_p,
c_int]
class struct__XrmHashBucketRec(Structure):
__slots__ = [
]
struct__XrmHashBucketRec._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/X11/Xlib.h:3836
XOpenIM = _lib.XOpenIM
XOpenIM.restype = XIM
XOpenIM.argtypes = [
POINTER(Display),
POINTER(struct__XrmHashBucketRec),
c_char_p,
c_char_p]
# /usr/include/X11/Xlib.h:3843
XCloseIM = _lib.XCloseIM
XCloseIM.restype = c_int
XCloseIM.argtypes = [XIM]
# /usr/include/X11/Xlib.h:3847
XGetIMValues = _lib.XGetIMValues
XGetIMValues.restype = c_char_p
XGetIMValues.argtypes = [XIM]
# /usr/include/X11/Xlib.h:3851
XSetIMValues = _lib.XSetIMValues
XSetIMValues.restype = c_char_p
XSetIMValues.argtypes = [XIM]
# /usr/include/X11/Xlib.h:3855
XDisplayOfIM = _lib.XDisplayOfIM
XDisplayOfIM.restype = POINTER(Display)
XDisplayOfIM.argtypes = [XIM]
# /usr/include/X11/Xlib.h:3859
XLocaleOfIM = _lib.XLocaleOfIM
XLocaleOfIM.restype = c_char_p
XLocaleOfIM.argtypes = [XIM]
# /usr/include/X11/Xlib.h:3863
XCreateIC = _lib.XCreateIC
XCreateIC.restype = XIC
XCreateIC.argtypes = [XIM]
# /usr/include/X11/Xlib.h:3867
XDestroyIC = _lib.XDestroyIC
XDestroyIC.restype = None
XDestroyIC.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3871
XSetICFocus = _lib.XSetICFocus
XSetICFocus.restype = None
XSetICFocus.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3875
XUnsetICFocus = _lib.XUnsetICFocus
XUnsetICFocus.restype = None
XUnsetICFocus.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3879
XwcResetIC = _lib.XwcResetIC
XwcResetIC.restype = c_wchar_p
XwcResetIC.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3883
XmbResetIC = _lib.XmbResetIC
XmbResetIC.restype = c_char_p
XmbResetIC.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3887
Xutf8ResetIC = _lib.Xutf8ResetIC
Xutf8ResetIC.restype = c_char_p
Xutf8ResetIC.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3891
XSetICValues = _lib.XSetICValues
XSetICValues.restype = c_char_p
XSetICValues.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3895
XGetICValues = _lib.XGetICValues
XGetICValues.restype = c_char_p
XGetICValues.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3899
XIMOfIC = _lib.XIMOfIC
XIMOfIC.restype = XIM
XIMOfIC.argtypes = [XIC]
# /usr/include/X11/Xlib.h:3903
XFilterEvent = _lib.XFilterEvent
XFilterEvent.restype = c_int
XFilterEvent.argtypes = [POINTER(XEvent), Window]
# /usr/include/X11/Xlib.h:3908
XmbLookupString = _lib.XmbLookupString
XmbLookupString.restype = c_int
XmbLookupString.argtypes = [
XIC,
POINTER(XKeyPressedEvent),
c_char_p,
c_int,
POINTER(KeySym),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:3917
XwcLookupString = _lib.XwcLookupString
XwcLookupString.restype = c_int
XwcLookupString.argtypes = [
XIC,
POINTER(XKeyPressedEvent),
c_wchar_p,
c_int,
POINTER(KeySym),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:3926
Xutf8LookupString = _lib.Xutf8LookupString
Xutf8LookupString.restype = c_int
Xutf8LookupString.argtypes = [
XIC,
POINTER(XKeyPressedEvent),
c_char_p,
c_int,
POINTER(KeySym),
POINTER(c_int)]
# /usr/include/X11/Xlib.h:3935
XVaCreateNestedList = _lib.XVaCreateNestedList
XVaCreateNestedList.restype = XVaNestedList
XVaCreateNestedList.argtypes = [c_int]
class struct__XrmHashBucketRec(Structure):
__slots__ = [
]
struct__XrmHashBucketRec._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/X11/Xlib.h:3941
XRegisterIMInstantiateCallback = _lib.XRegisterIMInstantiateCallback
XRegisterIMInstantiateCallback.restype = c_int
XRegisterIMInstantiateCallback.argtypes = [POINTER(Display), POINTER(
struct__XrmHashBucketRec), c_char_p, c_char_p, XIDProc, XPointer]
class struct__XrmHashBucketRec(Structure):
__slots__ = [
]
struct__XrmHashBucketRec._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/X11/Xlib.h:3950
XUnregisterIMInstantiateCallback = _lib.XUnregisterIMInstantiateCallback
XUnregisterIMInstantiateCallback.restype = c_int
XUnregisterIMInstantiateCallback.argtypes = [POINTER(Display), POINTER(
struct__XrmHashBucketRec), c_char_p, c_char_p, XIDProc, XPointer]
XConnectionWatchProc = CFUNCTYPE(
None,
POINTER(Display),
XPointer,
c_int,
c_int,
POINTER(XPointer)) # /usr/include/X11/Xlib.h:3959
# /usr/include/X11/Xlib.h:3968
XInternalConnectionNumbers = _lib.XInternalConnectionNumbers
XInternalConnectionNumbers.restype = c_int
XInternalConnectionNumbers.argtypes = [
POINTER(Display), POINTER(
POINTER(c_int)), POINTER(c_int)]
# /usr/include/X11/Xlib.h:3974
XProcessInternalConnection = _lib.XProcessInternalConnection
XProcessInternalConnection.restype = None
XProcessInternalConnection.argtypes = [POINTER(Display), c_int]
# /usr/include/X11/Xlib.h:3979
XAddConnectionWatch = _lib.XAddConnectionWatch
XAddConnectionWatch.restype = c_int
XAddConnectionWatch.argtypes = [
POINTER(Display),
XConnectionWatchProc,
XPointer]
# /usr/include/X11/Xlib.h:3985
XRemoveConnectionWatch = _lib.XRemoveConnectionWatch
XRemoveConnectionWatch.restype = None
XRemoveConnectionWatch.argtypes = [
POINTER(Display),
XConnectionWatchProc,
XPointer]
# /usr/include/X11/Xlib.h:3991
XSetAuthorization = _lib.XSetAuthorization
XSetAuthorization.restype = None
XSetAuthorization.argtypes = [c_char_p, c_int, c_char_p, c_int]
# /usr/include/X11/Xlib.h:3998
_Xmbtowc = _lib._Xmbtowc
_Xmbtowc.restype = c_int
_Xmbtowc.argtypes = [c_wchar_p, c_char_p, c_int]
# /usr/include/X11/Xlib.h:4009
_Xwctomb = _lib._Xwctomb
_Xwctomb.restype = c_int
_Xwctomb.argtypes = [c_char_p, c_wchar]
# /usr/include/X11/Xlib.h:4014
XGetEventData = _lib.XGetEventData
XGetEventData.restype = c_int
XGetEventData.argtypes = [POINTER(Display), POINTER(XGenericEventCookie)]
# /usr/include/X11/Xlib.h:4019
XFreeEventData = _lib.XFreeEventData
XFreeEventData.restype = None
XFreeEventData.argtypes = [POINTER(Display), POINTER(XGenericEventCookie)]
NoValue = 0 # /usr/include/X11/Xutil.h:4805
XValue = 1 # /usr/include/X11/Xutil.h:4806
YValue = 2 # /usr/include/X11/Xutil.h:4807
WidthValue = 4 # /usr/include/X11/Xutil.h:4808
HeightValue = 8 # /usr/include/X11/Xutil.h:4809
AllValues = 15 # /usr/include/X11/Xutil.h:4810
XNegative = 16 # /usr/include/X11/Xutil.h:4811
YNegative = 32 # /usr/include/X11/Xutil.h:4812
class struct_anon_95(Structure):
__slots__ = [
'flags',
'x',
'y',
'width',
'height',
'min_width',
'min_height',
'max_width',
'max_height',
'width_inc',
'height_inc',
'min_aspect',
'max_aspect',
'base_width',
'base_height',
'win_gravity',
]
class struct_anon_96(Structure):
__slots__ = [
'x',
'y',
]
struct_anon_96._fields_ = [
('x', c_int),
('y', c_int),
]
class struct_anon_97(Structure):
__slots__ = [
'x',
'y',
]
struct_anon_97._fields_ = [
('x', c_int),
('y', c_int),
]
struct_anon_95._fields_ = [
('flags', c_long),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('min_width', c_int),
('min_height', c_int),
('max_width', c_int),
('max_height', c_int),
('width_inc', c_int),
('height_inc', c_int),
('min_aspect', struct_anon_96),
('max_aspect', struct_anon_97),
('base_width', c_int),
('base_height', c_int),
('win_gravity', c_int),
]
XSizeHints = struct_anon_95 # /usr/include/X11/Xutil.h:4831
USPosition = 1 # /usr/include/X11/Xutil.h:4839
USSize = 2 # /usr/include/X11/Xutil.h:4840
PPosition = 4 # /usr/include/X11/Xutil.h:4842
PSize = 8 # /usr/include/X11/Xutil.h:4843
PMinSize = 16 # /usr/include/X11/Xutil.h:4844
PMaxSize = 32 # /usr/include/X11/Xutil.h:4845
PResizeInc = 64 # /usr/include/X11/Xutil.h:4846
PAspect = 128 # /usr/include/X11/Xutil.h:4847
PBaseSize = 256 # /usr/include/X11/Xutil.h:4848
PWinGravity = 512 # /usr/include/X11/Xutil.h:4849
PAllHints = 252 # /usr/include/X11/Xutil.h:4852
class struct_anon_98(Structure):
__slots__ = [
'flags',
'input',
'initial_state',
'icon_pixmap',
'icon_window',
'icon_x',
'icon_y',
'icon_mask',
'window_group',
]
struct_anon_98._fields_ = [
('flags', c_long),
('input', c_int),
('initial_state', c_int),
('icon_pixmap', Pixmap),
('icon_window', Window),
('icon_x', c_int),
('icon_y', c_int),
('icon_mask', Pixmap),
('window_group', XID),
]
XWMHints = struct_anon_98 # /usr/include/X11/Xutil.h:4867
InputHint = 1 # /usr/include/X11/Xutil.h:4871
StateHint = 2 # /usr/include/X11/Xutil.h:4872
IconPixmapHint = 4 # /usr/include/X11/Xutil.h:4873
IconWindowHint = 8 # /usr/include/X11/Xutil.h:4874
IconPositionHint = 16 # /usr/include/X11/Xutil.h:4875
IconMaskHint = 32 # /usr/include/X11/Xutil.h:4876
WindowGroupHint = 64 # /usr/include/X11/Xutil.h:4877
AllHints = 127 # /usr/include/X11/Xutil.h:4878
XUrgencyHint = 256 # /usr/include/X11/Xutil.h:4880
WithdrawnState = 0 # /usr/include/X11/Xutil.h:4883
NormalState = 1 # /usr/include/X11/Xutil.h:4884
IconicState = 3 # /usr/include/X11/Xutil.h:4885
DontCareState = 0 # /usr/include/X11/Xutil.h:4890
ZoomState = 2 # /usr/include/X11/Xutil.h:4891
InactiveState = 4 # /usr/include/X11/Xutil.h:4892
class struct_anon_99(Structure):
__slots__ = [
'value',
'encoding',
'format',
'nitems',
]
struct_anon_99._fields_ = [
('value', POINTER(c_ubyte)),
('encoding', Atom),
('format', c_int),
('nitems', c_ulong),
]
XTextProperty = struct_anon_99 # /usr/include/X11/Xutil.h:4905
XNoMemory = -1 # /usr/include/X11/Xutil.h:4907
XLocaleNotSupported = -2 # /usr/include/X11/Xutil.h:4908
XConverterNotFound = -3 # /usr/include/X11/Xutil.h:4909
enum_anon_100 = c_int
XStringStyle = 0
XCompoundTextStyle = 1
XTextStyle = 2
XStdICCTextStyle = 3
XUTF8StringStyle = 4
XICCEncodingStyle = enum_anon_100 # /usr/include/X11/Xutil.h:4918
class struct_anon_101(Structure):
__slots__ = [
'min_width',
'min_height',
'max_width',
'max_height',
'width_inc',
'height_inc',
]
struct_anon_101._fields_ = [
('min_width', c_int),
('min_height', c_int),
('max_width', c_int),
('max_height', c_int),
('width_inc', c_int),
('height_inc', c_int),
]
XIconSize = struct_anon_101 # /usr/include/X11/Xutil.h:4924
class struct_anon_102(Structure):
__slots__ = [
'res_name',
'res_class',
]
struct_anon_102._fields_ = [
('res_name', c_char_p),
('res_class', c_char_p),
]
XClassHint = struct_anon_102 # /usr/include/X11/Xutil.h:4929
class struct__XComposeStatus(Structure):
__slots__ = [
'compose_ptr',
'chars_matched',
]
struct__XComposeStatus._fields_ = [
('compose_ptr', XPointer),
('chars_matched', c_int),
]
XComposeStatus = struct__XComposeStatus # /usr/include/X11/Xutil.h:4971
class struct__XRegion(Structure):
__slots__ = [
]
struct__XRegion._fields_ = [
('_opaque_struct', c_int)
]
class struct__XRegion(Structure):
__slots__ = [
]
struct__XRegion._fields_ = [
('_opaque_struct', c_int)
]
Region = POINTER(struct__XRegion) # /usr/include/X11/Xutil.h:5010
RectangleOut = 0 # /usr/include/X11/Xutil.h:5014
RectangleIn = 1 # /usr/include/X11/Xutil.h:5015
RectanglePart = 2 # /usr/include/X11/Xutil.h:5016
class struct_anon_103(Structure):
__slots__ = [
'visual',
'visualid',
'screen',
'depth',
'class',
'red_mask',
'green_mask',
'blue_mask',
'colormap_size',
'bits_per_rgb',
]
struct_anon_103._fields_ = [
('visual', POINTER(Visual)),
('visualid', VisualID),
('screen', c_int),
('depth', c_int),
('class', c_int),
('red_mask', c_ulong),
('green_mask', c_ulong),
('blue_mask', c_ulong),
('colormap_size', c_int),
('bits_per_rgb', c_int),
]
XVisualInfo = struct_anon_103 # /usr/include/X11/Xutil.h:5039
VisualNoMask = 0 # /usr/include/X11/Xutil.h:5041
VisualIDMask = 1 # /usr/include/X11/Xutil.h:5042
VisualScreenMask = 2 # /usr/include/X11/Xutil.h:5043
VisualDepthMask = 4 # /usr/include/X11/Xutil.h:5044
VisualClassMask = 8 # /usr/include/X11/Xutil.h:5045
VisualRedMaskMask = 16 # /usr/include/X11/Xutil.h:5046
VisualGreenMaskMask = 32 # /usr/include/X11/Xutil.h:5047
VisualBlueMaskMask = 64 # /usr/include/X11/Xutil.h:5048
VisualColormapSizeMask = 128 # /usr/include/X11/Xutil.h:5049
VisualBitsPerRGBMask = 256 # /usr/include/X11/Xutil.h:5050
VisualAllMask = 511 # /usr/include/X11/Xutil.h:5051
class struct_anon_104(Structure):
__slots__ = [
'colormap',
'red_max',
'red_mult',
'green_max',
'green_mult',
'blue_max',
'blue_mult',
'base_pixel',
'visualid',
'killid',
]
struct_anon_104._fields_ = [
('colormap', Colormap),
('red_max', c_ulong),
('red_mult', c_ulong),
('green_max', c_ulong),
('green_mult', c_ulong),
('blue_max', c_ulong),
('blue_mult', c_ulong),
('base_pixel', c_ulong),
('visualid', VisualID),
('killid', XID),
]
XStandardColormap = struct_anon_104 # /usr/include/X11/Xutil.h:5068
BitmapSuccess = 0 # /usr/include/X11/Xutil.h:5076
BitmapOpenFailed = 1 # /usr/include/X11/Xutil.h:5077
BitmapFileInvalid = 2 # /usr/include/X11/Xutil.h:5078
BitmapNoMemory = 3 # /usr/include/X11/Xutil.h:5079
XCSUCCESS = 0 # /usr/include/X11/Xutil.h:5090
XCNOMEM = 1 # /usr/include/X11/Xutil.h:5091
XCNOENT = 2 # /usr/include/X11/Xutil.h:5092
XContext = c_int # /usr/include/X11/Xutil.h:5094
# /usr/include/X11/Xutil.h:5103
XAllocClassHint = _lib.XAllocClassHint
XAllocClassHint.restype = POINTER(XClassHint)
XAllocClassHint.argtypes = []
# /usr/include/X11/Xutil.h:5107
XAllocIconSize = _lib.XAllocIconSize
XAllocIconSize.restype = POINTER(XIconSize)
XAllocIconSize.argtypes = []
# /usr/include/X11/Xutil.h:5111
XAllocSizeHints = _lib.XAllocSizeHints
XAllocSizeHints.restype = POINTER(XSizeHints)
XAllocSizeHints.argtypes = []
# /usr/include/X11/Xutil.h:5115
XAllocStandardColormap = _lib.XAllocStandardColormap
XAllocStandardColormap.restype = POINTER(XStandardColormap)
XAllocStandardColormap.argtypes = []
# /usr/include/X11/Xutil.h:5119
XAllocWMHints = _lib.XAllocWMHints
XAllocWMHints.restype = POINTER(XWMHints)
XAllocWMHints.argtypes = []
# /usr/include/X11/Xutil.h:5123
XClipBox = _lib.XClipBox
XClipBox.restype = c_int
XClipBox.argtypes = [Region, POINTER(XRectangle)]
# /usr/include/X11/Xutil.h:5128
XCreateRegion = _lib.XCreateRegion
XCreateRegion.restype = Region
XCreateRegion.argtypes = []
# /usr/include/X11/Xutil.h:5132
XDefaultString = _lib.XDefaultString
XDefaultString.restype = c_char_p
XDefaultString.argtypes = []
# /usr/include/X11/Xutil.h:5134
XDeleteContext = _lib.XDeleteContext
XDeleteContext.restype = c_int
XDeleteContext.argtypes = [POINTER(Display), XID, XContext]
# /usr/include/X11/Xutil.h:5140
XDestroyRegion = _lib.XDestroyRegion
XDestroyRegion.restype = c_int
XDestroyRegion.argtypes = [Region]
# /usr/include/X11/Xutil.h:5144
XEmptyRegion = _lib.XEmptyRegion
XEmptyRegion.restype = c_int
XEmptyRegion.argtypes = [Region]
# /usr/include/X11/Xutil.h:5148
XEqualRegion = _lib.XEqualRegion
XEqualRegion.restype = c_int
XEqualRegion.argtypes = [Region, Region]
# /usr/include/X11/Xutil.h:5153
XFindContext = _lib.XFindContext
XFindContext.restype = c_int
XFindContext.argtypes = [POINTER(Display), XID, XContext, POINTER(XPointer)]
# /usr/include/X11/Xutil.h:5160
XGetClassHint = _lib.XGetClassHint
XGetClassHint.restype = c_int
XGetClassHint.argtypes = [POINTER(Display), Window, POINTER(XClassHint)]
# /usr/include/X11/Xutil.h:5166
XGetIconSizes = _lib.XGetIconSizes
XGetIconSizes.restype = c_int
XGetIconSizes.argtypes = [
POINTER(Display), Window, POINTER(
POINTER(XIconSize)), POINTER(c_int)]
# /usr/include/X11/Xutil.h:5173
XGetNormalHints = _lib.XGetNormalHints
XGetNormalHints.restype = c_int
XGetNormalHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints)]
# /usr/include/X11/Xutil.h:5179
XGetRGBColormaps = _lib.XGetRGBColormaps
XGetRGBColormaps.restype = c_int
XGetRGBColormaps.argtypes = [
POINTER(Display),
Window,
POINTER(
POINTER(XStandardColormap)),
POINTER(c_int),
Atom]
# /usr/include/X11/Xutil.h:5187
XGetSizeHints = _lib.XGetSizeHints
XGetSizeHints.restype = c_int
XGetSizeHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints), Atom]
# /usr/include/X11/Xutil.h:5194
XGetStandardColormap = _lib.XGetStandardColormap
XGetStandardColormap.restype = c_int
XGetStandardColormap.argtypes = [
POINTER(Display),
Window,
POINTER(XStandardColormap),
Atom]
# /usr/include/X11/Xutil.h:5201
XGetTextProperty = _lib.XGetTextProperty
XGetTextProperty.restype = c_int
XGetTextProperty.argtypes = [
POINTER(Display),
Window,
POINTER(XTextProperty),
Atom]
# /usr/include/X11/Xutil.h:5208
XGetVisualInfo = _lib.XGetVisualInfo
XGetVisualInfo.restype = POINTER(XVisualInfo)
XGetVisualInfo.argtypes = [
POINTER(Display),
c_long,
POINTER(XVisualInfo),
POINTER(c_int)]
# /usr/include/X11/Xutil.h:5215
XGetWMClientMachine = _lib.XGetWMClientMachine
XGetWMClientMachine.restype = c_int
XGetWMClientMachine.argtypes = [
POINTER(Display),
Window,
POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5221
XGetWMHints = _lib.XGetWMHints
XGetWMHints.restype = POINTER(XWMHints)
XGetWMHints.argtypes = [POINTER(Display), Window]
# /usr/include/X11/Xutil.h:5226
XGetWMIconName = _lib.XGetWMIconName
XGetWMIconName.restype = c_int
XGetWMIconName.argtypes = [POINTER(Display), Window, POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5232
XGetWMName = _lib.XGetWMName
XGetWMName.restype = c_int
XGetWMName.argtypes = [POINTER(Display), Window, POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5238
XGetWMNormalHints = _lib.XGetWMNormalHints
XGetWMNormalHints.restype = c_int
XGetWMNormalHints.argtypes = [
POINTER(Display),
Window,
POINTER(XSizeHints),
POINTER(c_long)]
# /usr/include/X11/Xutil.h:5245
XGetWMSizeHints = _lib.XGetWMSizeHints
XGetWMSizeHints.restype = c_int
XGetWMSizeHints.argtypes = [
POINTER(Display),
Window,
POINTER(XSizeHints),
POINTER(c_long),
Atom]
# /usr/include/X11/Xutil.h:5253
XGetZoomHints = _lib.XGetZoomHints
XGetZoomHints.restype = c_int
XGetZoomHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints)]
# /usr/include/X11/Xutil.h:5259
XIntersectRegion = _lib.XIntersectRegion
XIntersectRegion.restype = c_int
XIntersectRegion.argtypes = [Region, Region, Region]
# /usr/include/X11/Xutil.h:5265
XConvertCase = _lib.XConvertCase
XConvertCase.restype = None
XConvertCase.argtypes = [KeySym, POINTER(KeySym), POINTER(KeySym)]
# /usr/include/X11/Xutil.h:5271
XLookupString = _lib.XLookupString
XLookupString.restype = c_int
XLookupString.argtypes = [
POINTER(XKeyEvent),
c_char_p,
c_int,
POINTER(KeySym),
POINTER(XComposeStatus)]
# /usr/include/X11/Xutil.h:5279
XMatchVisualInfo = _lib.XMatchVisualInfo
XMatchVisualInfo.restype = c_int
XMatchVisualInfo.argtypes = [
POINTER(Display),
c_int,
c_int,
c_int,
POINTER(XVisualInfo)]
# /usr/include/X11/Xutil.h:5287
XOffsetRegion = _lib.XOffsetRegion
XOffsetRegion.restype = c_int
XOffsetRegion.argtypes = [Region, c_int, c_int]
# /usr/include/X11/Xutil.h:5293
XPointInRegion = _lib.XPointInRegion
XPointInRegion.restype = c_int
XPointInRegion.argtypes = [Region, c_int, c_int]
# /usr/include/X11/Xutil.h:5299
XPolygonRegion = _lib.XPolygonRegion
XPolygonRegion.restype = Region
XPolygonRegion.argtypes = [POINTER(XPoint), c_int, c_int]
# /usr/include/X11/Xutil.h:5305
XRectInRegion = _lib.XRectInRegion
XRectInRegion.restype = c_int
XRectInRegion.argtypes = [Region, c_int, c_int, c_uint, c_uint]
# /usr/include/X11/Xutil.h:5313
XSaveContext = _lib.XSaveContext
XSaveContext.restype = c_int
XSaveContext.argtypes = [POINTER(Display), XID, XContext, c_char_p]
# /usr/include/X11/Xutil.h:5320
XSetClassHint = _lib.XSetClassHint
XSetClassHint.restype = c_int
XSetClassHint.argtypes = [POINTER(Display), Window, POINTER(XClassHint)]
# /usr/include/X11/Xutil.h:5326
XSetIconSizes = _lib.XSetIconSizes
XSetIconSizes.restype = c_int
XSetIconSizes.argtypes = [POINTER(Display), Window, POINTER(XIconSize), c_int]
# /usr/include/X11/Xutil.h:5333
XSetNormalHints = _lib.XSetNormalHints
XSetNormalHints.restype = c_int
XSetNormalHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints)]
# /usr/include/X11/Xutil.h:5339
XSetRGBColormaps = _lib.XSetRGBColormaps
XSetRGBColormaps.restype = None
XSetRGBColormaps.argtypes = [
POINTER(Display),
Window,
POINTER(XStandardColormap),
c_int,
Atom]
# /usr/include/X11/Xutil.h:5347
XSetSizeHints = _lib.XSetSizeHints
XSetSizeHints.restype = c_int
XSetSizeHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints), Atom]
# /usr/include/X11/Xutil.h:5354
XSetStandardProperties = _lib.XSetStandardProperties
XSetStandardProperties.restype = c_int
XSetStandardProperties.argtypes = [
POINTER(Display),
Window,
c_char_p,
c_char_p,
Pixmap,
POINTER(c_char_p),
c_int,
POINTER(XSizeHints)]
# /usr/include/X11/Xutil.h:5365
XSetTextProperty = _lib.XSetTextProperty
XSetTextProperty.restype = None
XSetTextProperty.argtypes = [
POINTER(Display),
Window,
POINTER(XTextProperty),
Atom]
# /usr/include/X11/Xutil.h:5372
XSetWMClientMachine = _lib.XSetWMClientMachine
XSetWMClientMachine.restype = None
XSetWMClientMachine.argtypes = [
POINTER(Display),
Window,
POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5378
XSetWMHints = _lib.XSetWMHints
XSetWMHints.restype = c_int
XSetWMHints.argtypes = [POINTER(Display), Window, POINTER(XWMHints)]
# /usr/include/X11/Xutil.h:5384
XSetWMIconName = _lib.XSetWMIconName
XSetWMIconName.restype = None
XSetWMIconName.argtypes = [POINTER(Display), Window, POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5390
XSetWMName = _lib.XSetWMName
XSetWMName.restype = None
XSetWMName.argtypes = [POINTER(Display), Window, POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5396
XSetWMNormalHints = _lib.XSetWMNormalHints
XSetWMNormalHints.restype = None
XSetWMNormalHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints)]
# /usr/include/X11/Xutil.h:5402
XSetWMProperties = _lib.XSetWMProperties
XSetWMProperties.restype = None
XSetWMProperties.argtypes = [
POINTER(Display),
Window,
POINTER(XTextProperty),
POINTER(XTextProperty),
POINTER(c_char_p),
c_int,
POINTER(XSizeHints),
POINTER(XWMHints),
POINTER(XClassHint)]
# /usr/include/X11/Xutil.h:5414
XmbSetWMProperties = _lib.XmbSetWMProperties
XmbSetWMProperties.restype = None
XmbSetWMProperties.argtypes = [
POINTER(Display),
Window,
c_char_p,
c_char_p,
POINTER(c_char_p),
c_int,
POINTER(XSizeHints),
POINTER(XWMHints),
POINTER(XClassHint)]
# /usr/include/X11/Xutil.h:5426
Xutf8SetWMProperties = _lib.Xutf8SetWMProperties
Xutf8SetWMProperties.restype = None
Xutf8SetWMProperties.argtypes = [
POINTER(Display),
Window,
c_char_p,
c_char_p,
POINTER(c_char_p),
c_int,
POINTER(XSizeHints),
POINTER(XWMHints),
POINTER(XClassHint)]
# /usr/include/X11/Xutil.h:5438
XSetWMSizeHints = _lib.XSetWMSizeHints
XSetWMSizeHints.restype = None
XSetWMSizeHints.argtypes = [
POINTER(Display),
Window,
POINTER(XSizeHints),
Atom]
# /usr/include/X11/Xutil.h:5445
XSetRegion = _lib.XSetRegion
XSetRegion.restype = c_int
XSetRegion.argtypes = [POINTER(Display), GC, Region]
# /usr/include/X11/Xutil.h:5451
XSetStandardColormap = _lib.XSetStandardColormap
XSetStandardColormap.restype = None
XSetStandardColormap.argtypes = [
POINTER(Display),
Window,
POINTER(XStandardColormap),
Atom]
# /usr/include/X11/Xutil.h:5458
XSetZoomHints = _lib.XSetZoomHints
XSetZoomHints.restype = c_int
XSetZoomHints.argtypes = [POINTER(Display), Window, POINTER(XSizeHints)]
# /usr/include/X11/Xutil.h:5464
XShrinkRegion = _lib.XShrinkRegion
XShrinkRegion.restype = c_int
XShrinkRegion.argtypes = [Region, c_int, c_int]
# /usr/include/X11/Xutil.h:5470
XStringListToTextProperty = _lib.XStringListToTextProperty
XStringListToTextProperty.restype = c_int
XStringListToTextProperty.argtypes = [
POINTER(c_char_p), c_int, POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5476
XSubtractRegion = _lib.XSubtractRegion
XSubtractRegion.restype = c_int
XSubtractRegion.argtypes = [Region, Region, Region]
# /usr/include/X11/Xutil.h:5482
XmbTextListToTextProperty = _lib.XmbTextListToTextProperty
XmbTextListToTextProperty.restype = c_int
XmbTextListToTextProperty.argtypes = [POINTER(Display), POINTER(
c_char_p), c_int, XICCEncodingStyle, POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5490
XwcTextListToTextProperty = _lib.XwcTextListToTextProperty
XwcTextListToTextProperty.restype = c_int
XwcTextListToTextProperty.argtypes = [
POINTER(Display),
POINTER(c_wchar_p),
c_int,
XICCEncodingStyle,
POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5498
Xutf8TextListToTextProperty = _lib.Xutf8TextListToTextProperty
Xutf8TextListToTextProperty.restype = c_int
Xutf8TextListToTextProperty.argtypes = [
POINTER(Display),
POINTER(c_char_p),
c_int,
XICCEncodingStyle,
POINTER(XTextProperty)]
# /usr/include/X11/Xutil.h:5506
XwcFreeStringList = _lib.XwcFreeStringList
XwcFreeStringList.restype = None
XwcFreeStringList.argtypes = [POINTER(c_wchar_p)]
# /usr/include/X11/Xutil.h:5510
XTextPropertyToStringList = _lib.XTextPropertyToStringList
XTextPropertyToStringList.restype = c_int
XTextPropertyToStringList.argtypes = [
POINTER(XTextProperty), POINTER(
POINTER(c_char_p)), POINTER(c_int)]
# /usr/include/X11/Xutil.h:5516
XmbTextPropertyToTextList = _lib.XmbTextPropertyToTextList
XmbTextPropertyToTextList.restype = c_int
XmbTextPropertyToTextList.argtypes = [
POINTER(Display), POINTER(XTextProperty), POINTER(
POINTER(c_char_p)), POINTER(c_int)]
# /usr/include/X11/Xutil.h:5523
XwcTextPropertyToTextList = _lib.XwcTextPropertyToTextList
XwcTextPropertyToTextList.restype = c_int
XwcTextPropertyToTextList.argtypes = [
POINTER(Display), POINTER(XTextProperty), POINTER(
POINTER(c_wchar_p)), POINTER(c_int)]
# /usr/include/X11/Xutil.h:5530
Xutf8TextPropertyToTextList = _lib.Xutf8TextPropertyToTextList
Xutf8TextPropertyToTextList.restype = c_int
Xutf8TextPropertyToTextList.argtypes = [
POINTER(Display), POINTER(XTextProperty), POINTER(
POINTER(c_char_p)), POINTER(c_int)]
# /usr/include/X11/Xutil.h:5537
XUnionRectWithRegion = _lib.XUnionRectWithRegion
XUnionRectWithRegion.restype = c_int
XUnionRectWithRegion.argtypes = [POINTER(XRectangle), Region, Region]
# /usr/include/X11/Xutil.h:5543
XUnionRegion = _lib.XUnionRegion
XUnionRegion.restype = c_int
XUnionRegion.argtypes = [Region, Region, Region]
# /usr/include/X11/Xutil.h:5549
XWMGeometry = _lib.XWMGeometry
XWMGeometry.restype = c_int
XWMGeometry.argtypes = [
POINTER(Display),
c_int,
c_char_p,
c_char_p,
c_uint,
POINTER(XSizeHints),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int),
POINTER(c_int)]
# /usr/include/X11/Xutil.h:5563
XXorRegion = _lib.XXorRegion
XXorRegion.restype = c_int
XXorRegion.argtypes = [Region, Region, Region]
__all__ = ['XlibSpecificationRelease', 'X_PROTOCOL', 'X_PROTOCOL_REVISION',
'XID', 'Mask', 'Atom', 'VisualID', 'Time', 'Window', 'Drawable', 'Font',
'Pixmap', 'Cursor', 'Colormap', 'GContext', 'KeySym', 'KeyCode', 'None_',
'ParentRelative', 'CopyFromParent', 'PointerWindow', 'InputFocus',
'PointerRoot', 'AnyPropertyType', 'AnyKey', 'AnyButton', 'AllTemporary',
'CurrentTime', 'NoSymbol', 'NoEventMask', 'KeyPressMask', 'KeyReleaseMask',
'ButtonPressMask', 'ButtonReleaseMask', 'EnterWindowMask', 'LeaveWindowMask',
'PointerMotionMask', 'PointerMotionHintMask', 'Button1MotionMask',
'Button2MotionMask', 'Button3MotionMask', 'Button4MotionMask',
'Button5MotionMask', 'ButtonMotionMask', 'KeymapStateMask', 'ExposureMask',
'VisibilityChangeMask', 'StructureNotifyMask', 'ResizeRedirectMask',
'SubstructureNotifyMask', 'SubstructureRedirectMask', 'FocusChangeMask',
'PropertyChangeMask', 'ColormapChangeMask', 'OwnerGrabButtonMask', 'KeyPress',
'KeyRelease', 'ButtonPress', 'ButtonRelease', 'MotionNotify', 'EnterNotify',
'LeaveNotify', 'FocusIn', 'FocusOut', 'KeymapNotify', 'Expose',
'GraphicsExpose', 'NoExpose', 'VisibilityNotify', 'CreateNotify',
'DestroyNotify', 'UnmapNotify', 'MapNotify', 'MapRequest', 'ReparentNotify',
'ConfigureNotify', 'ConfigureRequest', 'GravityNotify', 'ResizeRequest',
'CirculateNotify', 'CirculateRequest', 'PropertyNotify', 'SelectionClear',
'SelectionRequest', 'SelectionNotify', 'ColormapNotify', 'ClientMessage',
'MappingNotify', 'GenericEvent', 'LASTEvent', 'ShiftMask', 'LockMask',
'ControlMask', 'Mod1Mask', 'Mod2Mask', 'Mod3Mask', 'Mod4Mask', 'Mod5Mask',
'ShiftMapIndex', 'LockMapIndex', 'ControlMapIndex', 'Mod1MapIndex',
'Mod2MapIndex', 'Mod3MapIndex', 'Mod4MapIndex', 'Mod5MapIndex', 'Button1Mask',
'Button2Mask', 'Button3Mask', 'Button4Mask', 'Button5Mask', 'AnyModifier',
'Button1', 'Button2', 'Button3', 'Button4', 'Button5', 'NotifyNormal',
'NotifyGrab', 'NotifyUngrab', 'NotifyWhileGrabbed', 'NotifyHint',
'NotifyAncestor', 'NotifyVirtual', 'NotifyInferior', 'NotifyNonlinear',
'NotifyNonlinearVirtual', 'NotifyPointer', 'NotifyPointerRoot',
'NotifyDetailNone', 'VisibilityUnobscured', 'VisibilityPartiallyObscured',
'VisibilityFullyObscured', 'PlaceOnTop', 'PlaceOnBottom', 'FamilyInternet',
'FamilyDECnet', 'FamilyChaos', 'FamilyInternet6', 'FamilyServerInterpreted',
'PropertyNewValue', 'PropertyDelete', 'ColormapUninstalled',
'ColormapInstalled', 'GrabModeSync', 'GrabModeAsync', 'GrabSuccess',
'AlreadyGrabbed', 'GrabInvalidTime', 'GrabNotViewable', 'GrabFrozen',
'AsyncPointer', 'SyncPointer', 'ReplayPointer', 'AsyncKeyboard',
'SyncKeyboard', 'ReplayKeyboard', 'AsyncBoth', 'SyncBoth', 'RevertToParent',
'Success', 'BadRequest', 'BadValue', 'BadWindow', 'BadPixmap', 'BadAtom',
'BadCursor', 'BadFont', 'BadMatch', 'BadDrawable', 'BadAccess', 'BadAlloc',
'BadColor', 'BadGC', 'BadIDChoice', 'BadName', 'BadLength',
'BadImplementation', 'FirstExtensionError', 'LastExtensionError',
'InputOutput', 'InputOnly', 'CWBackPixmap', 'CWBackPixel', 'CWBorderPixmap',
'CWBorderPixel', 'CWBitGravity', 'CWWinGravity', 'CWBackingStore',
'CWBackingPlanes', 'CWBackingPixel', 'CWOverrideRedirect', 'CWSaveUnder',
'CWEventMask', 'CWDontPropagate', 'CWColormap', 'CWCursor', 'CWX', 'CWY',
'CWWidth', 'CWHeight', 'CWBorderWidth', 'CWSibling', 'CWStackMode',
'ForgetGravity', 'NorthWestGravity', 'NorthGravity', 'NorthEastGravity',
'WestGravity', 'CenterGravity', 'EastGravity', 'SouthWestGravity',
'SouthGravity', 'SouthEastGravity', 'StaticGravity', 'UnmapGravity',
'NotUseful', 'WhenMapped', 'Always', 'IsUnmapped', 'IsUnviewable',
'IsViewable', 'SetModeInsert', 'SetModeDelete', 'DestroyAll',
'RetainPermanent', 'RetainTemporary', 'Above', 'Below', 'TopIf', 'BottomIf',
'Opposite', 'RaiseLowest', 'LowerHighest', 'PropModeReplace',
'PropModePrepend', 'PropModeAppend', 'GXclear', 'GXand', 'GXandReverse',
'GXcopy', 'GXandInverted', 'GXnoop', 'GXxor', 'GXor', 'GXnor', 'GXequiv',
'GXinvert', 'GXorReverse', 'GXcopyInverted', 'GXorInverted', 'GXnand',
'GXset', 'LineSolid', 'LineOnOffDash', 'LineDoubleDash', 'CapNotLast',
'CapButt', 'CapRound', 'CapProjecting', 'JoinMiter', 'JoinRound', 'JoinBevel',
'FillSolid', 'FillTiled', 'FillStippled', 'FillOpaqueStippled', 'EvenOddRule',
'WindingRule', 'ClipByChildren', 'IncludeInferiors', 'Unsorted', 'YSorted',
'YXSorted', 'YXBanded', 'CoordModeOrigin', 'CoordModePrevious', 'Complex',
'Nonconvex', 'Convex', 'ArcChord', 'ArcPieSlice', 'GCFunction', 'GCPlaneMask',
'GCForeground', 'GCBackground', 'GCLineWidth', 'GCLineStyle', 'GCCapStyle',
'GCJoinStyle', 'GCFillStyle', 'GCFillRule', 'GCTile', 'GCStipple',
'GCTileStipXOrigin', 'GCTileStipYOrigin', 'GCFont', 'GCSubwindowMode',
'GCGraphicsExposures', 'GCClipXOrigin', 'GCClipYOrigin', 'GCClipMask',
'GCDashOffset', 'GCDashList', 'GCArcMode', 'GCLastBit', 'FontLeftToRight',
'FontRightToLeft', 'FontChange', 'XYBitmap', 'XYPixmap', 'ZPixmap',
'AllocNone', 'AllocAll', 'DoRed', 'DoGreen', 'DoBlue', 'CursorShape',
'TileShape', 'StippleShape', 'AutoRepeatModeOff', 'AutoRepeatModeOn',
'AutoRepeatModeDefault', 'LedModeOff', 'LedModeOn', 'KBKeyClickPercent',
'KBBellPercent', 'KBBellPitch', 'KBBellDuration', 'KBLed', 'KBLedMode',
'KBKey', 'KBAutoRepeatMode', 'MappingSuccess', 'MappingBusy', 'MappingFailed',
'MappingModifier', 'MappingKeyboard', 'MappingPointer', 'DontPreferBlanking',
'PreferBlanking', 'DefaultBlanking', 'DisableScreenSaver',
'DisableScreenInterval', 'DontAllowExposures', 'AllowExposures',
'DefaultExposures', 'ScreenSaverReset', 'ScreenSaverActive', 'HostInsert',
'HostDelete', 'EnableAccess', 'DisableAccess', 'StaticGray', 'GrayScale',
'StaticColor', 'PseudoColor', 'TrueColor', 'DirectColor', 'LSBFirst',
'MSBFirst', '_Xmblen', 'X_HAVE_UTF8_STRING', 'XPointer', 'Bool', 'Status',
'True_', 'False_', 'QueuedAlready', 'QueuedAfterReading', 'QueuedAfterFlush',
'XExtData', 'XExtCodes', 'XPixmapFormatValues', 'XGCValues', 'GC', 'Visual',
'Depth', 'Screen', 'ScreenFormat', 'XSetWindowAttributes',
'XWindowAttributes', 'XHostAddress', 'XServerInterpretedAddress', 'XImage',
'XWindowChanges', 'XColor', 'XSegment', 'XPoint', 'XRectangle', 'XArc',
'XKeyboardControl', 'XKeyboardState', 'XTimeCoord', 'XModifierKeymap',
'Display', '_XPrivDisplay', 'XKeyEvent', 'XKeyPressedEvent',
'XKeyReleasedEvent', 'XButtonEvent', 'XButtonPressedEvent',
'XButtonReleasedEvent', 'XMotionEvent', 'XPointerMovedEvent',
'XCrossingEvent', 'XEnterWindowEvent', 'XLeaveWindowEvent',
'XFocusChangeEvent', 'XFocusInEvent', 'XFocusOutEvent', 'XKeymapEvent',
'XExposeEvent', 'XGraphicsExposeEvent', 'XNoExposeEvent', 'XVisibilityEvent',
'XCreateWindowEvent', 'XDestroyWindowEvent', 'XUnmapEvent', 'XMapEvent',
'XMapRequestEvent', 'XReparentEvent', 'XConfigureEvent', 'XGravityEvent',
'XResizeRequestEvent', 'XConfigureRequestEvent', 'XCirculateEvent',
'XCirculateRequestEvent', 'XPropertyEvent', 'XSelectionClearEvent',
'XSelectionRequestEvent', 'XSelectionEvent', 'XColormapEvent',
'XClientMessageEvent', 'XMappingEvent', 'XErrorEvent', 'XAnyEvent',
'XGenericEvent', 'XGenericEventCookie', 'XEvent', 'XCharStruct', 'XFontProp',
'XFontStruct', 'XTextItem', 'XChar2b', 'XTextItem16', 'XEDataObject',
'XFontSetExtents', 'XOM', 'XOC', 'XFontSet', 'XmbTextItem', 'XwcTextItem',
'XOMCharSetList', 'XOrientation', 'XOMOrientation_LTR_TTB',
'XOMOrientation_RTL_TTB', 'XOMOrientation_TTB_LTR', 'XOMOrientation_TTB_RTL',
'XOMOrientation_Context', 'XOMOrientation', 'XOMFontInfo', 'XIM', 'XIC',
'XIMProc', 'XICProc', 'XIDProc', 'XIMStyle', 'XIMStyles', 'XIMPreeditArea',
'XIMPreeditCallbacks', 'XIMPreeditPosition', 'XIMPreeditNothing',
'XIMPreeditNone', 'XIMStatusArea', 'XIMStatusCallbacks', 'XIMStatusNothing',
'XIMStatusNone', 'XBufferOverflow', 'XLookupNone', 'XLookupChars',
'XLookupKeySym', 'XLookupBoth', 'XVaNestedList', 'XIMCallback', 'XICCallback',
'XIMFeedback', 'XIMReverse', 'XIMUnderline', 'XIMHighlight', 'XIMPrimary',
'XIMSecondary', 'XIMTertiary', 'XIMVisibleToForward', 'XIMVisibleToBackword',
'XIMVisibleToCenter', 'XIMText', 'XIMPreeditState', 'XIMPreeditUnKnown',
'XIMPreeditEnable', 'XIMPreeditDisable',
'XIMPreeditStateNotifyCallbackStruct', 'XIMResetState', 'XIMInitialState',
'XIMPreserveState', 'XIMStringConversionFeedback',
'XIMStringConversionLeftEdge', 'XIMStringConversionRightEdge',
'XIMStringConversionTopEdge', 'XIMStringConversionBottomEdge',
'XIMStringConversionConcealed', 'XIMStringConversionWrapped',
'XIMStringConversionText', 'XIMStringConversionPosition',
'XIMStringConversionType', 'XIMStringConversionBuffer',
'XIMStringConversionLine', 'XIMStringConversionWord',
'XIMStringConversionChar', 'XIMStringConversionOperation',
'XIMStringConversionSubstitution', 'XIMStringConversionRetrieval',
'XIMCaretDirection', 'XIMForwardChar', 'XIMBackwardChar', 'XIMForwardWord',
'XIMBackwardWord', 'XIMCaretUp', 'XIMCaretDown', 'XIMNextLine',
'XIMPreviousLine', 'XIMLineStart', 'XIMLineEnd', 'XIMAbsolutePosition',
'XIMDontChange', 'XIMStringConversionCallbackStruct',
'XIMPreeditDrawCallbackStruct', 'XIMCaretStyle', 'XIMIsInvisible',
'XIMIsPrimary', 'XIMIsSecondary', 'XIMPreeditCaretCallbackStruct',
'XIMStatusDataType', 'XIMTextType', 'XIMBitmapType',
'XIMStatusDrawCallbackStruct', 'XIMHotKeyTrigger', 'XIMHotKeyTriggers',
'XIMHotKeyState', 'XIMHotKeyStateON', 'XIMHotKeyStateOFF', 'XIMValuesList',
'XLoadQueryFont', 'XQueryFont', 'XGetMotionEvents', 'XDeleteModifiermapEntry',
'XGetModifierMapping', 'XInsertModifiermapEntry', 'XNewModifiermap',
'XCreateImage', 'XInitImage', 'XGetImage', 'XGetSubImage', 'XOpenDisplay',
'XrmInitialize', 'XFetchBytes', 'XFetchBuffer', 'XGetAtomName',
'XGetAtomNames', 'XGetDefault', 'XDisplayName', 'XKeysymToString',
'XSynchronize', 'XSetAfterFunction', 'XInternAtom', 'XInternAtoms',
'XCopyColormapAndFree', 'XCreateColormap', 'XCreatePixmapCursor',
'XCreateGlyphCursor', 'XCreateFontCursor', 'XLoadFont', 'XCreateGC',
'XGContextFromGC', 'XFlushGC', 'XCreatePixmap', 'XCreateBitmapFromData',
'XCreatePixmapFromBitmapData', 'XCreateSimpleWindow', 'XGetSelectionOwner',
'XCreateWindow', 'XListInstalledColormaps', 'XListFonts',
'XListFontsWithInfo', 'XGetFontPath', 'XListExtensions', 'XListProperties',
'XListHosts', 'XKeycodeToKeysym', 'XLookupKeysym', 'XGetKeyboardMapping',
'XStringToKeysym', 'XMaxRequestSize', 'XExtendedMaxRequestSize',
'XResourceManagerString', 'XScreenResourceString', 'XDisplayMotionBufferSize',
'XVisualIDFromVisual', 'XInitThreads', 'XLockDisplay', 'XUnlockDisplay',
'XInitExtension', 'XAddExtension', 'XFindOnExtensionList',
'XEHeadOfExtensionList', 'XRootWindow', 'XDefaultRootWindow',
'XRootWindowOfScreen', 'XDefaultVisual', 'XDefaultVisualOfScreen',
'XDefaultGC', 'XDefaultGCOfScreen', 'XBlackPixel', 'XWhitePixel',
'XAllPlanes', 'XBlackPixelOfScreen', 'XWhitePixelOfScreen', 'XNextRequest',
'XLastKnownRequestProcessed', 'XServerVendor', 'XDisplayString',
'XDefaultColormap', 'XDefaultColormapOfScreen', 'XDisplayOfScreen',
'XScreenOfDisplay', 'XDefaultScreenOfDisplay', 'XEventMaskOfScreen',
'XScreenNumberOfScreen', 'XErrorHandler', 'XSetErrorHandler',
'XIOErrorHandler', 'XSetIOErrorHandler', 'XListPixmapFormats', 'XListDepths',
'XReconfigureWMWindow', 'XGetWMProtocols', 'XSetWMProtocols',
'XIconifyWindow', 'XWithdrawWindow', 'XGetCommand', 'XGetWMColormapWindows',
'XSetWMColormapWindows', 'XFreeStringList', 'XSetTransientForHint',
'XActivateScreenSaver', 'XAddHost', 'XAddHosts', 'XAddToExtensionList',
'XAddToSaveSet', 'XAllocColor', 'XAllocColorCells', 'XAllocColorPlanes',
'XAllocNamedColor', 'XAllowEvents', 'XAutoRepeatOff', 'XAutoRepeatOn',
'XBell', 'XBitmapBitOrder', 'XBitmapPad', 'XBitmapUnit', 'XCellsOfScreen',
'XChangeActivePointerGrab', 'XChangeGC', 'XChangeKeyboardControl',
'XChangeKeyboardMapping', 'XChangePointerControl', 'XChangeProperty',
'XChangeSaveSet', 'XChangeWindowAttributes', 'XCheckIfEvent',
'XCheckMaskEvent', 'XCheckTypedEvent', 'XCheckTypedWindowEvent',
'XCheckWindowEvent', 'XCirculateSubwindows', 'XCirculateSubwindowsDown',
'XCirculateSubwindowsUp', 'XClearArea', 'XClearWindow', 'XCloseDisplay',
'XConfigureWindow', 'XConnectionNumber', 'XConvertSelection', 'XCopyArea',
'XCopyGC', 'XCopyPlane', 'XDefaultDepth', 'XDefaultDepthOfScreen',
'XDefaultScreen', 'XDefineCursor', 'XDeleteProperty', 'XDestroyWindow',
'XDestroySubwindows', 'XDoesBackingStore', 'XDoesSaveUnders',
'XDisableAccessControl', 'XDisplayCells', 'XDisplayHeight',
'XDisplayHeightMM', 'XDisplayKeycodes', 'XDisplayPlanes', 'XDisplayWidth',
'XDisplayWidthMM', 'XDrawArc', 'XDrawArcs', 'XDrawImageString',
'XDrawImageString16', 'XDrawLine', 'XDrawLines', 'XDrawPoint', 'XDrawPoints',
'XDrawRectangle', 'XDrawRectangles', 'XDrawSegments', 'XDrawString',
'XDrawString16', 'XDrawText', 'XDrawText16', 'XEnableAccessControl',
'XEventsQueued', 'XFetchName', 'XFillArc', 'XFillArcs', 'XFillPolygon',
'XFillRectangle', 'XFillRectangles', 'XFlush', 'XForceScreenSaver', 'XFree',
'XFreeColormap', 'XFreeColors', 'XFreeCursor', 'XFreeExtensionList',
'XFreeFont', 'XFreeFontInfo', 'XFreeFontNames', 'XFreeFontPath', 'XFreeGC',
'XFreeModifiermap', 'XFreePixmap', 'XGeometry', 'XGetErrorDatabaseText',
'XGetErrorText', 'XGetFontProperty', 'XGetGCValues', 'XGetGeometry',
'XGetIconName', 'XGetInputFocus', 'XGetKeyboardControl', 'XGetPointerControl',
'XGetPointerMapping', 'XGetScreenSaver', 'XGetTransientForHint',
'XGetWindowProperty', 'XGetWindowAttributes', 'XGrabButton', 'XGrabKey',
'XGrabKeyboard', 'XGrabPointer', 'XGrabServer', 'XHeightMMOfScreen',
'XHeightOfScreen', 'XIfEvent', 'XImageByteOrder', 'XInstallColormap',
'XKeysymToKeycode', 'XKillClient', 'XLookupColor', 'XLowerWindow',
'XMapRaised', 'XMapSubwindows', 'XMapWindow', 'XMaskEvent',
'XMaxCmapsOfScreen', 'XMinCmapsOfScreen', 'XMoveResizeWindow', 'XMoveWindow',
'XNextEvent', 'XNoOp', 'XParseColor', 'XParseGeometry', 'XPeekEvent',
'XPeekIfEvent', 'XPending', 'XPlanesOfScreen', 'XProtocolRevision',
'XProtocolVersion', 'XPutBackEvent', 'XPutImage', 'XQLength',
'XQueryBestCursor', 'XQueryBestSize', 'XQueryBestStipple', 'XQueryBestTile',
'XQueryColor', 'XQueryColors', 'XQueryExtension', 'XQueryKeymap',
'XQueryPointer', 'XQueryTextExtents', 'XQueryTextExtents16', 'XQueryTree',
'XRaiseWindow', 'XReadBitmapFile', 'XReadBitmapFileData', 'XRebindKeysym',
'XRecolorCursor', 'XRefreshKeyboardMapping', 'XRemoveFromSaveSet',
'XRemoveHost', 'XRemoveHosts', 'XReparentWindow', 'XResetScreenSaver',
'XResizeWindow', 'XRestackWindows', 'XRotateBuffers',
'XRotateWindowProperties', 'XScreenCount', 'XSelectInput', 'XSendEvent',
'XSetAccessControl', 'XSetArcMode', 'XSetBackground', 'XSetClipMask',
'XSetClipOrigin', 'XSetClipRectangles', 'XSetCloseDownMode', 'XSetCommand',
'XSetDashes', 'XSetFillRule', 'XSetFillStyle', 'XSetFont', 'XSetFontPath',
'XSetForeground', 'XSetFunction', 'XSetGraphicsExposures', 'XSetIconName',
'XSetInputFocus', 'XSetLineAttributes', 'XSetModifierMapping',
'XSetPlaneMask', 'XSetPointerMapping', 'XSetScreenSaver',
'XSetSelectionOwner', 'XSetState', 'XSetStipple', 'XSetSubwindowMode',
'XSetTSOrigin', 'XSetTile', 'XSetWindowBackground',
'XSetWindowBackgroundPixmap', 'XSetWindowBorder', 'XSetWindowBorderPixmap',
'XSetWindowBorderWidth', 'XSetWindowColormap', 'XStoreBuffer', 'XStoreBytes',
'XStoreColor', 'XStoreColors', 'XStoreName', 'XStoreNamedColor', 'XSync',
'XTextExtents', 'XTextExtents16', 'XTextWidth', 'XTextWidth16',
'XTranslateCoordinates', 'XUndefineCursor', 'XUngrabButton', 'XUngrabKey',
'XUngrabKeyboard', 'XUngrabPointer', 'XUngrabServer', 'XUninstallColormap',
'XUnloadFont', 'XUnmapSubwindows', 'XUnmapWindow', 'XVendorRelease',
'XWarpPointer', 'XWidthMMOfScreen', 'XWidthOfScreen', 'XWindowEvent',
'XWriteBitmapFile', 'XSupportsLocale', 'XSetLocaleModifiers', 'XOpenOM',
'XCloseOM', 'XSetOMValues', 'XGetOMValues', 'XDisplayOfOM', 'XLocaleOfOM',
'XCreateOC', 'XDestroyOC', 'XOMOfOC', 'XSetOCValues', 'XGetOCValues',
'XCreateFontSet', 'XFreeFontSet', 'XFontsOfFontSet',
'XBaseFontNameListOfFontSet', 'XLocaleOfFontSet', 'XContextDependentDrawing',
'XDirectionalDependentDrawing', 'XContextualDrawing', 'XExtentsOfFontSet',
'XmbTextEscapement', 'XwcTextEscapement', 'Xutf8TextEscapement',
'XmbTextExtents', 'XwcTextExtents', 'Xutf8TextExtents',
'XmbTextPerCharExtents', 'XwcTextPerCharExtents', 'Xutf8TextPerCharExtents',
'XmbDrawText', 'XwcDrawText', 'Xutf8DrawText', 'XmbDrawString',
'XwcDrawString', 'Xutf8DrawString', 'XmbDrawImageString',
'XwcDrawImageString', 'Xutf8DrawImageString', 'XOpenIM', 'XCloseIM',
'XGetIMValues', 'XSetIMValues', 'XDisplayOfIM', 'XLocaleOfIM', 'XCreateIC',
'XDestroyIC', 'XSetICFocus', 'XUnsetICFocus', 'XwcResetIC', 'XmbResetIC',
'Xutf8ResetIC', 'XSetICValues', 'XGetICValues', 'XIMOfIC', 'XFilterEvent',
'XmbLookupString', 'XwcLookupString', 'Xutf8LookupString',
'XVaCreateNestedList', 'XRegisterIMInstantiateCallback',
'XUnregisterIMInstantiateCallback', 'XConnectionWatchProc',
'XInternalConnectionNumbers', 'XProcessInternalConnection',
'XAddConnectionWatch', 'XRemoveConnectionWatch', 'XSetAuthorization',
'_Xmbtowc', '_Xwctomb', 'XGetEventData', 'XFreeEventData', 'NoValue',
'XValue', 'YValue', 'WidthValue', 'HeightValue', 'AllValues', 'XNegative',
'YNegative', 'XSizeHints', 'USPosition', 'USSize', 'PPosition', 'PSize',
'PMinSize', 'PMaxSize', 'PResizeInc', 'PAspect', 'PBaseSize', 'PWinGravity',
'PAllHints', 'XWMHints', 'InputHint', 'StateHint', 'IconPixmapHint',
'IconWindowHint', 'IconPositionHint', 'IconMaskHint', 'WindowGroupHint',
'AllHints', 'XUrgencyHint', 'WithdrawnState', 'NormalState', 'IconicState',
'DontCareState', 'ZoomState', 'InactiveState', 'XTextProperty', 'XNoMemory',
'XLocaleNotSupported', 'XConverterNotFound', 'XICCEncodingStyle',
'XStringStyle', 'XCompoundTextStyle', 'XTextStyle', 'XStdICCTextStyle',
'XUTF8StringStyle', 'XIconSize', 'XClassHint', 'XComposeStatus', 'Region',
'RectangleOut', 'RectangleIn', 'RectanglePart', 'XVisualInfo', 'VisualNoMask',
'VisualIDMask', 'VisualScreenMask', 'VisualDepthMask', 'VisualClassMask',
'VisualRedMaskMask', 'VisualGreenMaskMask', 'VisualBlueMaskMask',
'VisualColormapSizeMask', 'VisualBitsPerRGBMask', 'VisualAllMask',
'XStandardColormap', 'BitmapSuccess', 'BitmapOpenFailed', 'BitmapFileInvalid',
'BitmapNoMemory', 'XCSUCCESS', 'XCNOMEM', 'XCNOENT', 'XContext',
'XAllocClassHint', 'XAllocIconSize', 'XAllocSizeHints',
'XAllocStandardColormap', 'XAllocWMHints', 'XClipBox', 'XCreateRegion',
'XDefaultString', 'XDeleteContext', 'XDestroyRegion', 'XEmptyRegion',
'XEqualRegion', 'XFindContext', 'XGetClassHint', 'XGetIconSizes',
'XGetNormalHints', 'XGetRGBColormaps', 'XGetSizeHints',
'XGetStandardColormap', 'XGetTextProperty', 'XGetVisualInfo',
'XGetWMClientMachine', 'XGetWMHints', 'XGetWMIconName', 'XGetWMName',
'XGetWMNormalHints', 'XGetWMSizeHints', 'XGetZoomHints', 'XIntersectRegion',
'XConvertCase', 'XLookupString', 'XMatchVisualInfo', 'XOffsetRegion',
'XPointInRegion', 'XPolygonRegion', 'XRectInRegion', 'XSaveContext',
'XSetClassHint', 'XSetIconSizes', 'XSetNormalHints', 'XSetRGBColormaps',
'XSetSizeHints', 'XSetStandardProperties', 'XSetTextProperty',
'XSetWMClientMachine', 'XSetWMHints', 'XSetWMIconName', 'XSetWMName',
'XSetWMNormalHints', 'XSetWMProperties', 'XmbSetWMProperties',
'Xutf8SetWMProperties', 'XSetWMSizeHints', 'XSetRegion',
'XSetStandardColormap', 'XSetZoomHints', 'XShrinkRegion',
'XStringListToTextProperty', 'XSubtractRegion', 'XmbTextListToTextProperty',
'XwcTextListToTextProperty', 'Xutf8TextListToTextProperty',
'XwcFreeStringList', 'XTextPropertyToStringList', 'XmbTextPropertyToTextList',
'XwcTextPropertyToTextList', 'Xutf8TextPropertyToTextList',
'XUnionRectWithRegion', 'XUnionRegion', 'XWMGeometry', 'XXorRegion']
| psychopy/versions | psychopy/iohub/devices/xlib.py | Python | gpl-3.0 | 190,917 |
# -*- coding: utf-8 -*-
#############################################
class Duck:
_fly_behavior = None;
_quack_behavior = None;
def __init__(self):
pass
def display(self):
pass
def perform_fly(self):
self._fly_behavior.fly()
def perform_quack(self):
self._quack_behavior.quack()
def swim():
print("All ducks float")
@property
def fly_behavior(self):
return self._fly_behavior
@fly_behavior.setter
def fly_behavior(self, value):
self._fly_behavior = value
#############################################
class FlyBehavior:
def fly(self):
pass
class FlyWithWings(FlyBehavior):
def fly(self):
print("I'm flying!")
class FlyNoWay(FlyBehavior):
def fly(self):
print("I can't fly")
class FlyRocketPowered(FlyBehavior):
def fly(self):
print("I'm flying with rocket")
#############################################
class QuackBehavior:
def quack(self):
pass
class Quack(QuackBehavior):
def quack(self):
print("Quack")
class MuteQuack(QuackBehavior):
def quack(self):
print("Silence")
class Squeak(QuackBehavior):
def quack(self):
print("Squeak")
#############################################
class MallardDuck(Duck):
def __init__(self):
self._quack_behavior = Quack()
self._fly_behavior = FlyWithWings()
class ModelDuck(Duck):
def __init__(self):
self._fly_behavior = FlyNoWay()
self._quack_behavior = Quack()
def display(self):
print("I'm a model duck")
#############################################
if __name__ == "__main__":
mallard = MallardDuck()
mallard.perform_quack()
mallard.perform_fly()
model = ModelDuck()
model.perform_fly()
model.fly_behavior = FlyRocketPowered()
model.perform_fly()
| tisOO/Patterns | strategy/python/example_1/strategy.py | Python | mit | 1,711 |
import unittest
from string_searching import search_for_substring
class TestStringSearching(unittest.TestCase):
def test_edge(self):
self.assertEqual('true', search_for_substring('Hello,ello'))
self.assertEqual('true', search_for_substring('Hello,Hello'))
def test_given(self):
self.assertEqual('true', search_for_substring('Hello,ell'))
self.assertEqual('true', search_for_substring('This is good, is'))
self.assertEqual('true', search_for_substring('CodeEval,C*Eval'))
self.assertEqual('false', search_for_substring('Old,Young'))
def test_asterisk(self):
self.assertEqual('false', search_for_substring('CodeEval,C*Dval'))
self.assertEqual('true', search_for_substring('CodeECodeEval,C*Eval'))
self.assertEqual('true', search_for_substring(r'C*de,C*de'))
self.assertEqual('true', search_for_substring(r'C*de,*de'))
if __name__ == '__main__':
unittest.main() | Goyatuzo/Challenges | CodeEval/Hard/String Searching/test_string_searching.py | Python | mit | 960 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import binascii
import contextlib
import functools
import inspect
import sys
import time
import traceback
from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneauth1 import exceptions as keystone_exception
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import build_results
from nova.compute import claims
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova import conductor
import nova.conf
import nova.context
from nova import exception
from nova import exception_wrapper
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
from nova import network
from nova.network import base_api as base_net_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import whitelist
from nova import rpc
from nova import safe_utils
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception_wrapper.wrap_exception,
get_notifier=get_notifier,
binary='nova-compute')
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except Exception as ex:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
migration = keyed_args['migration']
# NOTE(rajesht): If InstanceNotFound error is thrown from
# decorated function, migration status should be set to
# 'error', without checking current migration status.
if not isinstance(ex, exception.InstanceNotFound):
status = migration.status
if status not in ['migrating', 'post-migrating']:
return
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug('Error setting migration status '
'for instance %s.',
migration.instance_uuid, exc_info=True)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info(_LI("Task possibly preempted: %s"),
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
# NOTE(mriedem): 'instance' must be in keyed_args because we
# have utils.expects_func_args('instance') decorating this
# method.
instance = keyed_args['instance']
original_task_state = instance.task_state
try:
self._instance_update(context, instance, task_state=None)
LOG.info(_LI("Successfully reverted task state from %s on "
"failure for instance."), original_task_state,
instance=instance)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
# should not trace on it.
pass
except Exception as e:
msg = _LW("Failed to revert task state for instance. "
"Error: %s")
LOG.warning(msg, e, instance=instance)
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.__code__.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except exception.ImageNotFound:
# Since we're trying to cleanup an image, we don't care if
# if it's already gone.
pass
except Exception:
LOG.exception(_LE("Error while trying to clean up "
"image %s"), image_id,
instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# TODO(alaski): Actually remove this after Newton, assuming a major RPC bump
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
# try to get metadata and system_metadata for most cases but
# only attempt to load those if the db instance already has
# those fields joined
metas = [meta for meta in ('metadata', 'system_metadata')
if meta in instance_or_dict]
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, event_name):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param event_name: the name of the event we're expecting
:returns: an event object that should be wait()'d on
"""
if self._events is None:
# NOTE(danms): We really should have a more specific error
# here, but this is what we use for our default error case
raise exception.NovaException('In shutdown, no new events '
'can be scheduled')
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
instance_events = self._events.setdefault(instance.uuid, {})
return instance_events.setdefault(event_name,
eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(event)s',
{'event': event_name}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
if not self._events:
LOG.debug('Unexpected attempt to pop events during shutdown',
instance=instance)
return no_events_sentinel
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop(event.key, None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result is no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result is no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
if self._events is None:
LOG.debug('Unexpected attempt to clear events during shutdown',
instance=instance)
return dict()
return self._events.pop(instance.uuid, {})
return _clear_events()
def cancel_all_events(self):
if self._events is None:
LOG.debug('Unexpected attempt to cancel events during shutdown.')
return
our_events = self._events
# NOTE(danms): Block new events
self._events = None
for instance_uuid, events in our_events.items():
for event_name, eventlet_event in events.items():
LOG.debug('Canceling in-flight event %(event)s for '
'instance %(instance_uuid)s',
{'event': event_name,
'instance_uuid': instance_uuid})
name, tag = event_name.rsplit('-', 1)
event = objects.InstanceExternalEvent(
instance_uuid=instance_uuid,
name=name, status='failed',
tag=tag, data={})
eventlet_event.send(event)
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
if isinstance(event_name, tuple):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(
name, tag)
try:
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, event_name))
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='4.13')
# How long to wait in seconds before re-issuing a shutdown
# signal to an instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = cinder.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self._resource_tracker = None
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool(
size=CONF.sync_power_state_pool_size)
self._syncs_in_progress = {}
self.send_instance_updates = (
CONF.filter_scheduler.track_instance_changes)
if CONF.max_concurrent_builds != 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
if max(CONF.max_concurrent_live_migrations, 0) != 0:
self._live_migration_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_live_migrations)
else:
self._live_migration_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _get_resource_tracker(self):
if not self._resource_tracker:
rt = resource_tracker.ResourceTracker(self.host, self.driver)
self._resource_tracker = rt
return self._resource_tracker
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if instance.host == self.host:
rt = self._get_resource_tracker()
rt.update_usage(context, instance, instance.node)
def _instance_update(self, context, instance, **kwargs):
"""Update an instance in the database using kwargs as value."""
for k, v in kwargs.items():
setattr(instance, k, v)
instance.save()
self._update_resource_tracker(context, instance)
def _nil_out_instance_obj_host_and_node(self, instance):
# NOTE(jwcroppe): We don't do instance.save() here for performance
# reasons; a call to this is expected to be immediately followed by
# another call that does instance.save(), thus avoiding two writes
# to the database layer.
instance.host = None
instance.node = None
def _set_instance_obj_error_state(self, context, instance,
clean_task_state=False):
try:
instance.vm_state = vm_states.ERROR
if clean_task_state:
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
task state or RESIZED vm state.
"""
filters = {
'source_compute': self.host,
'status': ['accepted', 'done'],
'migration_type': 'evacuation',
}
evacuations = objects.MigrationList.get_by_filters(context, filters)
if not evacuations:
return
evacuations = {mig.instance_uuid: mig for mig in evacuations}
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
evacuated = [inst for inst in local_instances
if inst.uuid in evacuations]
for instance in evacuated:
migration = evacuations[instance.uuid]
LOG.info(_LI('Deleting instance as it has been evacuated from '
'this host'), instance=instance)
try:
network_info = self.network_api.get_instance_nw_info(
context, instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info(_LI('Instance has been marked deleted already, '
'removing it from the hypervisor.'),
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
migration.status = 'completed'
migration.save()
def _is_instance_storage_shared(self, context, instance, host=None):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data, host=host))
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage',
instance=instance)
shared_storage = False
except Exception:
LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
system_meta = instance.system_metadata
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
quotas = objects.Quotas(context=context)
project_id, user_id = objects.quotas.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id, instances=-1,
cores=-instance.flavor.vcpus,
ram=-instance.flavor.memory_mb)
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
def _complete_deletion(self, context, instance, bdms,
quotas, system_meta):
if quotas:
quotas.commit()
# ensure block device mappings are not leaked
for bdm in bdms:
bdm.destroy()
self._update_resource_tracker(context, instance)
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.END)
self._delete_scheduler_instance_info(context, instance.uuid)
def _create_reservations(self, context, instance, project_id, user_id):
vcpus = instance.flavor.vcpus
mem_mb = instance.flavor.memory_mb
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-vcpus,
ram=-mem_mb)
return quotas
def _init_instance(self, context, instance):
'''Initialize this instance during service init.'''
# NOTE(danms): If the instance appears to not be owned by this
# host, it may have been evacuated away, but skipped by the
# evacuation cleanup code due to configuration. Thus, if that
# is a possibility, don't touch the instance in any way, but
# log the concern. This will help avoid potential issues on
# startup due to misconfiguration.
if instance.host != self.host:
LOG.warning(_LW('Instance %(uuid)s appears to not be owned '
'by this host, but by %(host)s. Startup '
'processing is being skipped.'),
{'uuid': instance.uuid,
'host': instance.host})
return
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to cleanup snapshot.')
LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.RESIZE_PREP]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info(_LI('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.'), instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = objects.quotas.ids_from_instance(
context, instance)
quotas = self._create_reservations(context, instance,
project_id, user_id)
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_obj_error_state(context, instance)
return
current_power_state = self._get_power_state(context, instance)
try_reboot, reboot_type = self._retry_reboot(context, instance,
current_power_state)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
# NOTE(mikal): if the instance was doing a soft reboot that got as
# far as shutting down the instance but not as far as starting it
# again, then we've just become a hard reboot. That means the
# task state for the instance needs to change so that we're in one
# of the expected task states for a hard reboot.
soft_types = [task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING,
task_states.REBOOTING]
if instance.task_state in soft_types and reboot_type == 'HARD':
instance.task_state = task_states.REBOOT_PENDING_HARD
instance.save()
self.reboot_instance(context, instance, block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning(_LW("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance"),
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception(_LE('Failed to unpause instance'),
instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance, True)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
net_info = compute_utils.get_nw_info_for_instance(instance)
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception(_LE("Vifs plug failed"), instance=instance)
self._set_instance_obj_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception:
LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_LI('Instance found in migrating state during '
'startup. Resetting task_state'),
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
LOG.info(_LI('Rebooting instance after nova-compute restart.'),
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning(_LW('Hypervisor driver does not support '
'resume guests'), instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning(_LW('Failed to resume instance'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'firewall rules', instance=instance)
def _retry_reboot(self, context, instance, current_power_state):
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info(_LI("VM %(state)s (Lifecycle Event)"),
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_SUSPENDED:
vm_power_state = power_state.SUSPENDED
else:
LOG.warning(_LW("Unexpected power state %d"),
event.get_transition())
# Note(lpetrut): The event may be delayed, thus not reflecting
# the current instance power state. In that case, ignore the event.
current_power_state = self._get_power_state(context, instance)
if current_power_state == vm_power_state:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
{'event': event.get_name(),
'vm_state': instance.vm_state,
'task_state': instance.task_state,
'db_power_state': instance.power_state,
'vm_power_state': vm_power_state},
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
if CONF.workarounds.handle_virt_lifecycle_events:
self.driver.register_event_listener(self.handle_events)
else:
# NOTE(mriedem): If the _sync_power_states periodic task is
# disabled we should emit a warning in the logs.
if CONF.sync_power_state_interval < 0:
LOG.warning(_LW('Instance lifecycle events from the compute '
'driver have been disabled. Note that lifecycle '
'changes to an instance outside of the compute '
'service will not be synchronized '
'automatically since the _sync_power_states '
'periodic task is also disabled.'))
else:
LOG.info(_LI('Instance lifecycle events from the compute '
'driver have been disabled. Note that lifecycle '
'changes to an instance outside of the compute '
'service will only be synchronized by the '
'_sync_power_states periodic task.'))
def init_host(self):
"""Initialization for a standalone compute service."""
if CONF.pci.passthrough_whitelist:
# Simply loading the PCI passthrough whitelist will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
# constructed when updating available resources for the compute
# node(s) in the resource tracker, effectively killing that task.
# So load up the whitelist when starting the compute service to
# flush any invalid configuration early so we can kill the service
# if the configuration is wrong.
whitelist.Whitelist(CONF.pci.passthrough_whitelist)
# NOTE(sbauza): We want the compute node to hard fail if it can't be
# able to provide its resources to the placement API, or it would not
# be able to be eligible as a destination.
if CONF.placement.os_region_name is None:
raise exception.PlacementNotConfigured()
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._update_scheduler_instance_info(context, instances)
def cleanup_host(self):
self.driver.register_event_listener(None)
self.instance_events.cancel_all_events()
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context(),
startup=True)
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
# NOTE(hanlind): This and the virt method it calls can be removed in
# version 5.0 of the RPC API
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
# TODO(alaski): Remove object_compat for RPC version 5.0
@object_compat
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronize the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'security groups.', instance=instance)
return _sync_refresh()
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0."),
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status == 'available':
return attempt
LOG.warning(_LW("Volume id: %(vol_id)s finished being "
"created but its status is %(vol_status)s."),
{'vol_id': vol_id,
'vol_status': volume_status})
break
greenthread.sleep(CONF.block_device_allocate_retries_interval)
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempt,
volume_status=volume_status)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
# Py3 raises binascii.Error instead of TypeError as in Py27
try:
decoded = base64.b64decode(contents)
return path, decoded
except (TypeError, binascii.Error):
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _validate_instance_group_policy(self, context, instance,
filter_properties):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# the policy. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. It's also possible that
# multiple instances with an affinity policy could end up on different
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group')
if not group_hint:
return
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy "
"was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
elif 'affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if group_hosts and self.host not in group_hosts:
msg = _("Affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
def _log_original_error(self, exc_info, instance_uuid):
LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry')
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.__name__,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_obj_error_state(context, instance)
LOG.warning(_LW("Instance build timed out. Set to error "
"state."), instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn, dhcp_options):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
# First check to see if we're specifically not supposed to allocate
# networks because if so, we can exit early.
if requested_networks and requested_networks.no_allocate:
LOG.debug("Not allocating networking since 'none' was specified.",
instance=instance)
return network_model.NetworkInfo([])
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
attempts = retries + 1
retry_time = 1
bind_host_id = self.driver.network_binding_host_id(context, instance)
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
dhcp_options=dhcp_options,
bind_host_id=bind_host_id)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
instance.system_metadata['network_allocated'] = 'True'
# NOTE(JoshNang) do not save the instance here, as it can cause
# races. The caller shares a reference to instance and waits
# for this async greenthread to finish before calling
# instance.save().
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception(_LE('Instance failed network setup '
'after %(attempts)d attempt(s)'),
log_info)
six.reraise(*exc_info)
LOG.warning(_LW('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)'),
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self.network_api.get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups, dhcp_options)
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups, dhcp_options):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
self._update_resource_tracker(context, instance)
is_vpn = pipelib.is_vpn_image(instance.image_ref)
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn,
dhcp_options)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _get_device_name_for_instance(self, instance, bdms, block_device_obj):
# NOTE(ndipanov): Copy obj to avoid changing the original
block_device_obj = block_device_obj.obj_clone()
try:
return self.driver.get_device_name_for_instance(
instance, bdms, block_device_obj)
except NotImplementedError:
return compute_utils.get_device_name_for_instance(
instance, bdms, block_device_obj.get("device_name"))
def _default_block_device_names(self, instance, image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = list(filter(block_device.new_format_is_ephemeral,
block_devices))
swap = list(filter(block_device.new_format_is_swap,
block_devices))
block_device_mapping = list(filter(
driver_block_device.is_block_device_mapping, block_devices))
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _block_device_info_to_legacy(self, block_device_info):
"""Convert BDI to the old format for drivers that need it."""
if self.use_legacy_block_device_info:
ephemerals = driver_block_device.legacy_block_devices(
driver.block_device_info_get_ephemerals(block_device_info))
mapping = driver_block_device.legacy_block_devices(
driver.block_device_info_get_mapping(block_device_info))
swap = block_device_info['swap']
if swap:
swap = swap.legacy()
block_device_info.update({
'ephemerals': ephemerals,
'swap': swap,
'block_device_mapping': mapping})
def _add_missing_dev_names(self, bdms, instance):
for bdm in bdms:
if bdm.device_name is not None:
continue
device_name = self._get_device_name_for_instance(instance,
bdms, bdm)
values = {'device_name': device_name}
bdm.update(values)
bdm.save()
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
try:
self._add_missing_dev_names(bdms, instance)
block_device_info = driver.get_block_device_info(instance, bdms)
mapping = driver.block_device_info_get_mapping(block_device_info)
driver_block_device.attach_block_devices(
mapping, context, instance, self.volume_api, self.driver,
wait_func=self._await_block_device_map_created)
self._block_device_info_to_legacy(block_device_info)
return block_device_info
except exception.OverQuota:
msg = _LW('Failed to create block device for instance due to '
'being over volume resource quota')
LOG.warning(msg, instance=instance)
raise exception.VolumeLimitExceeded()
except Exception:
LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
def _update_instance_after_spawn(self, context, instance):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
configdrive.update_instance(instance)
def _update_scheduler_instance_info(self, context, instance):
"""Sends an InstanceList with created or updated Instance objects to
the Scheduler client.
In the case of init_host, the value passed will already be an
InstanceList. Other calls will send individual Instance objects that
have been created or resized. In this case, we create an InstanceList
object containing that Instance.
"""
if not self.send_instance_updates:
return
if isinstance(instance, obj_instance.Instance):
instance = objects.InstanceList(objects=[instance])
context = context.elevated()
self.scheduler_client.update_instance_info(context, self.host,
instance)
def _delete_scheduler_instance_info(self, context, instance_uuid):
"""Sends the uuid of the deleted Instance to the Scheduler client."""
if not self.send_instance_updates:
return
context = context.elevated()
self.scheduler_client.delete_instance_info(context, self.host,
instance_uuid)
@periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval)
def _sync_scheduler_instance_info(self, context):
if not self.send_instance_updates:
return
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if requested_networks and requested_networks.no_allocate:
LOG.debug("Skipping network deallocation for instance since "
"networking was not requested.", instance=instance)
return
LOG.debug('Deallocating network for instance', instance=instance)
with timeutils.StopWatch() as timer:
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
# nova-network does an rpc call so we're OK tracking time spent here
LOG.info(_LI('Took %0.2f seconds to deallocate network for instance.'),
timer.elapsed(), instance=instance)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance, bdms)
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_info['block_device_mapping'] = [
bdm for bdm in driver.block_device_info_get_mapping(
block_device_info)
if bdm.get('connection_info')]
else:
driver_block_device.refresh_conn_infos(
driver.block_device_info_get_mapping(block_device_info),
context, instance, self.volume_api, self.driver)
self._block_device_info_to_legacy(block_device_info)
return block_device_info
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None):
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._build_semaphore:
self._do_build_and_run_instance(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits)
def _check_device_tagging(self, requested_networks, block_device_mapping):
tagging_requested = False
if requested_networks:
for net in requested_networks:
if 'tag' in net and net.tag is not None:
tagging_requested = True
break
if block_device_mapping and not tagging_requested:
for bdm in block_device_mapping:
if 'tag' in bdm and bdm.tag is not None:
tagging_requested = True
break
if (tagging_requested and
not self.driver.capabilities.get('supports_device_tagging')):
raise exception.BuildAbortException('Attempt to boot guest with '
'tagged devices on host that '
'does not support tagging.')
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None):
try:
LOG.debug('Starting instance...', instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
try:
with timeutils.StopWatch() as timer:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties)
LOG.info(_LI('Took %0.2f seconds to build instance.'),
timer.elapsed(), instance=instance)
return build_results.ACTIVE
except exception.RescheduledException as e:
retry = filter_properties.get('retry')
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info(),
fault_message=e.kwargs['reason'])
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
# This will be used for logging the exception
retry['exc'] = traceback.format_exception(*sys.exc_info())
# This will be used for setting the instance fault message
retry['exc_reason'] = e.kwargs['reason']
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
# NOTE(vladikr): SR-IOV ports should be deallocated to
# allow new sriov pci devices to be allocated on a new host.
# Otherwise, if devices with pci addresses are already allocated
# on the destination host, the instance will fail to spawn.
# info_cache.network_info should be present at this stage.
if (self.driver.deallocate_networks_on_reschedule(instance) or
self.deallocate_sriov_ports_on_reschedule(instance)):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# to cleanup those network resources setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
self._nil_out_instance_obj_host_and_node(instance)
instance.task_state = task_states.SCHEDULING
instance.save()
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping)
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
except Exception as e:
# Should not reach here.
msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance.uuid,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
def deallocate_sriov_ports_on_reschedule(self, instance):
"""Determine if networks are needed to be deallocated before reschedule
Check the cached network info for any assigned SR-IOV ports.
SR-IOV ports should be deallocated prior to rescheduling
in order to allow new sriov pci devices to be allocated on a new host.
"""
info_cache = instance.info_cache
def _has_sriov_port(vif):
return vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV
if (info_cache and info_cache.network_info):
for vif in info_cache.network_info:
if _has_sriov_port(vif):
return True
return False
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.START)
# NOTE(mikal): cache the keystone roles associated with the instance
# at boot time for later reference
instance.system_metadata.update(
{'boot_roles': ','.join(context.roles)})
self._check_device_tagging(requested_networks, block_device_mapping)
try:
rt = self._get_resource_tracker()
with rt.instance_claim(context, instance, node, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
image_meta = objects.ImageMeta.from_dict(image)
with self._build_resources(context, instance,
requested_networks, security_groups, image_meta,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
LOG.debug('Start spawning the instance on the hypervisor.',
instance=instance)
with timeutils.StopWatch() as timer:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
LOG.info(_LI('Took %0.2f seconds to spawn the instance on '
'the hypervisor.'), timer.elapsed(),
instance=instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning(_LW('No more network or fixed IP to be allocated'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException,
exception.FixedIpInvalidOnHost,
exception.UnableToAutoAllocateNetwork) as e:
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable,
exception.InvalidDiskInfo,
exception.InvalidDiskFormat,
exception.SignatureVerificationError,
exception.VolumeEncryptionNotSupported,
exception.InvalidInput) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
network_name = CONF.default_access_ip_network_name
if (network_name and not instance.access_ip_v4 and
not instance.access_ip_v6):
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if not instance.access_ip_v4 and ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if not instance.access_ip_v6 and ip['version'] == 6:
instance.access_ip_v6 = ip['address']
break
self._update_instance_after_spawn(context, instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.ERROR, exception=e)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.CREATE,
phase=fields.NotificationPhase.END)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image_meta, block_device_mapping):
resources = {}
network_info = None
try:
LOG.debug('Start building networks asynchronously for instance.',
instance=instance)
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(instance, image_meta,
block_device_mapping)
LOG.debug('Start building block device mappings for instance.',
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception():
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
except (exception.UnexpectedTaskStateError,
exception.VolumeLimitExceeded,
exception.InvalidBDM) as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception(_LE('Failure prepping block device'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (
exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
# if network_info is empty we're likely here because of
# network allocation failure. Since nothing can be reused on
# rescheduling it's better to deallocate network to eliminate
# the chance of orphaned ports in neutron
deallocate_networks = False if network_info else True
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=deallocate_networks)
except Exception as exc2:
ctxt.reraise = False
LOG.warning(_LW('Could not clean up failed build,'
' not rescheduling. Error: %s'),
six.text_type(exc2))
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=six.text_type(exc))
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to deallocate network for instance. '
'Error: %s'), ex,
instance=instance)
self._set_instance_obj_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info(_LI('Terminating instance'), instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.START)
network_info = compute_utils.get_nw_info_for_instance(instance)
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
timer = timeutils.StopWatch()
try:
LOG.debug('Start destroying the instance on the hypervisor.',
instance=instance)
timer.start()
self.driver.destroy(context, instance, network_info,
block_device_info)
LOG.info(_LI('Took %0.2f seconds to destroy the instance on the '
'hypervisor.'), timer.elapsed(), instance=instance)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
timer.restart()
for bdm in vol_bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id, instance.uuid)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning(_LW('Ignoring EndpointNotFound for '
'volume %(volume_id)s: %(exc)s'),
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
except cinder_exception.ClientException as exc:
LOG.warning(_LW('Ignoring unknown cinder exception for '
'volume %(volume_id)s: %(exc)s'),
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
except Exception as exc:
LOG.warning(_LW('Ignoring unknown exception for '
'volume %(volume_id)s: %(exc)s'),
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
if vol_bdms:
LOG.info(_LI('Took %(time).2f seconds to detach %(num)s volumes '
'for instance.'),
{'time': timer.elapsed(), 'num': len(vol_bdms)},
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.END)
def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
exc_info = None
for bdm in bdms:
LOG.debug("terminating bdm %s", bdm,
instance_uuid=instance_uuid)
if bdm.volume_id and bdm.delete_on_termination:
try:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning(_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms, quotas):
"""Delete an instance on this host. Commit or rollback quotas
as necessary.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
:param quotas: nova.objects.quotas.Quotas object
"""
was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if was_soft_deleted:
# Instances in SOFT_DELETED vm_state have already had quotas
# decremented.
try:
quotas.rollback()
except Exception:
pass
try:
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
self._notify_about_instance_usage(context, instance,
"delete.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.START)
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
# _shutdown_instance in the compute manager as shutdown calls
# deallocate_for_instance so the info_cache is still needed
# at this point.
if instance.info_cache is not None:
instance.info_cache.delete()
else:
# NOTE(yoshimatsu): Avoid AttributeError if instance.info_cache
# is None. When the root cause that instance.info_cache becomes
# None is fixed, the log level should be reconsidered.
LOG.warning(_LW("Info cache for instance could not be found. "
"Ignore."), instance=instance)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
self._cleanup_volumes(context, instance.uuid, bdms,
raise_exc=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
system_meta = instance.system_metadata
instance.destroy()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
self._complete_deletion(context,
instance,
bdms,
quotas,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms, reservations):
"""Terminate an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
# NOTE(mriedem): If we are deleting the instance while it was
# booting from volume, we could be racing with a database update of
# the BDM volume_id. Since the compute API passes the BDMs over RPC
# to compute here, the BDMs may be stale at this point. So check
# for any volume BDMs that don't have volume_id set and if we
# detect that, we need to refresh the BDM list before proceeding.
# TODO(mriedem): Move this into _delete_instance and make the bdms
# parameter optional.
for bdm in list(bdms):
if bdm.is_volume and not bdm.volume_id:
LOG.debug('There are potentially stale BDMs during '
'delete, refreshing the BlockDeviceMappingList.',
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
break
try:
self._delete_instance(context, instance, bdms, quotas)
except exception.InstanceNotFound:
LOG.info(_LI("Instance disappeared during terminate"),
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
{'vm_state': instance.vm_state,
'task_state': instance.task_state,
'db_power_state': instance.power_state,
'current_power_state': current_power_state},
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info(_LI('Instance is already powered off in the '
'hypervisor when stop is called.'),
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_OFF,
phase=fields.NotificationPhase.START)
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_OFF,
phase=fields.NotificationPhase.END)
do_stop_instance()
def _power_on(self, context, instance):
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
"""Delete snapshot of shelved instance."""
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_ON,
phase=fields.NotificationPhase.START)
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
# Delete an image(VM snapshot) for a shelved instance
snapshot_id = instance.system_metadata.get('shelved_image_id')
if snapshot_id:
self._delete_snapshot_of_shelved_instance(context, instance,
snapshot_id)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_ON,
phase=fields.NotificationPhase.END)
@messaging.expected_exceptions(NotImplementedError,
exception.TriggerCrashDumpNotSupported,
exception.InstanceNotRunning)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def trigger_crash_dump(self, context, instance):
"""Trigger crash dump in an instance."""
self._notify_about_instance_usage(context, instance,
"trigger_crash_dump.start")
# This method does not change task_state and power_state because the
# effect of a trigger depends on user's configuration.
self.driver.trigger_crash_dump(instance)
self._notify_about_instance_usage(context, instance,
"trigger_crash_dump.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def soft_delete_instance(self, context, instance, reservations):
"""Soft delete an instance on this host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
self._notify_about_instance_usage(context, instance,
"soft_delete.start")
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
quotas.commit()
self._notify_about_instance_usage(context, instance, "soft_delete.end")
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESTORE,
phase=fields.NotificationPhase.START)
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESTORE,
phase=fields.NotificationPhase.END)
@staticmethod
def _set_migration_status(migration, status):
"""Set the status, and guard against a None being passed in.
This is useful as some of the compute RPC calls will not pass
a migration object in older versions. The check can be removed when
we move past 4.x major version of the RPC API.
"""
if migration:
migration.status = status
migration.save()
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, bdms,
detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
if recreate:
detach_block_devices(context, bdms)
else:
self._power_off_instance(context, instance, clean_shutdown=True)
detach_block_devices(context, bdms)
self.driver.destroy(context, instance,
network_info=network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
with instance.mutated_migration_context():
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=new_block_device_info)
def _notify_instance_rebuild_error(self, context, instance, error):
self._notify_about_instance_usage(context, instance,
'rebuild.error', fault=error)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.ERROR, exception=error)
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node=None, limits=None):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage.
If not provided then information from the
driver will be used to decide if the instance
files are available or not on the target host
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param migration: a Migration object if one was created for this
rebuild operation (if it's a part of evacuate)
:param scheduled_node: A node of the host chosen by the scheduler. If a
host was specified by the user, this will be
None
:param limits: Overcommit limits set by the scheduler. If a host was
specified by the user, this will be None
"""
context = context.elevated()
LOG.info(_LI("Rebuilding instance"), instance=instance)
if scheduled_node is not None:
rt = self._get_resource_tracker()
rebuild_claim = rt.rebuild_claim
else:
rebuild_claim = claims.NopClaim
image_meta = {}
if image_ref:
image_meta = self.image_api.get(context, image_ref)
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
if not scheduled_node:
if recreate:
try:
compute_node = self._get_compute_info(context, self.host)
scheduled_node = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
else:
scheduled_node = instance.node
with self._error_out_instance_on_exception(context, instance):
try:
claim_ctxt = rebuild_claim(
context, instance, scheduled_node,
limits=limits, image_meta=image_meta,
migration=migration)
self._do_rebuild_instance_with_claim(
claim_ctxt, context, instance, orig_image_ref,
image_ref, injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage, preserve_ephemeral)
except exception.ComputeResourcesUnavailable as e:
LOG.debug("Could not rebuild instance on this host, not "
"enough resources available.", instance=instance)
# NOTE(ndipanov): We just abort the build for now and leave a
# migration record for potential cleanup later
self._set_migration_status(migration, 'failed')
self._notify_instance_rebuild_error(context, instance, e)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
LOG.debug('Instance was deleted while rebuilding',
instance=instance)
self._set_migration_status(migration, 'failed')
self._notify_instance_rebuild_error(context, instance, e)
except Exception as e:
self._set_migration_status(migration, 'failed')
self._notify_instance_rebuild_error(context, instance, e)
raise
else:
instance.apply_migration_context()
# NOTE (ndipanov): This save will now update the host and node
# attributes making sure that next RT pass is consistent since
# it will be based on the instance and not the migration DB
# entry.
instance.host = self.host
instance.node = scheduled_node
instance.save()
instance.drop_migration_context()
# NOTE (ndipanov): Mark the migration as done only after we
# mark the instance as belonging to this host.
self._set_migration_status(migration, 'done')
def _do_rebuild_instance_with_claim(self, claim_context, *args, **kwargs):
"""Helper to avoid deep nesting in the top-level method."""
with claim_context:
self._do_rebuild_instance(*args, **kwargs)
@staticmethod
def _get_image_name(image_meta):
if image_meta.obj_attr_is_set("name"):
return image_meta.name
else:
return ''
def _do_rebuild_instance(self, context, instance, orig_image_ref,
image_ref, injected_files, new_pass,
orig_sys_metadata, bdms, recreate,
on_shared_storage, preserve_ephemeral):
orig_vm_state = instance.vm_state
if recreate:
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
if on_shared_storage is None:
LOG.debug('on_shared_storage is not provided, using driver'
'information to decide if the instance needs to'
'be recreated')
on_shared_storage = self.driver.instance_on_disk(instance)
elif (on_shared_storage !=
self.driver.instance_on_disk(instance)):
# To cover case when admin expects that instance files are
# on shared storage, but not accessible and vice versa
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info(_LI('disk on shared storage, recreating using'
' existing disk'))
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info(_LI("disk not on shared storage, rebuilding from:"
" '%s'"), str(image_ref))
if image_ref:
image_meta = objects.ImageMeta.from_image_ref(
context, self.image_api, image_ref)
else:
image_meta = instance.image_meta
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
# TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(
self.notifier, context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': self._get_image_name(image_meta)}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
# NOTE: image_name is not included in the versioned notification
# because we already provide the image_uuid in the notification
# payload and the image details can be looked up via the uuid.
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.START)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
self.network_api.setup_networks_on_host(
context, instance, self.host)
# For nova-network this is needed to move floating IPs
# For neutron this updates the host in the port binding
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host)
network_info = compute_utils.get_nw_info_for_instance(instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
self._detach_volume(context, bdm.volume_id, instance,
destroy_bdm=False)
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
with instance.mutated_migration_context():
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info(_LI("bringing vm to original state: '%s'"),
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance, False)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.END)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info(_LI("Detaching from volume api: %s"), volume_id)
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = (task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED)
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = (task_states.REBOOTING_HARD,
task_states.REBOOT_PENDING_HARD,
task_states.REBOOT_STARTED_HARD)
context = context.elevated()
LOG.info(_LI("Rebooting instance"), instance=instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBOOT,
phase=fields.NotificationPhase.START
)
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning(_LW('Reboot failed but instance is running'),
instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBOOT,
phase=fields.NotificationPhase.ERROR,
exception=error
)
ctxt.reraise = False
else:
LOG.error(_LE('Cannot reboot instance: %s'), error,
instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning(_LW("Instance disappeared during reboot"),
instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBOOT,
phase=fields.NotificationPhase.END
)
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance):
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param image_id: glance.db.sqlalchemy.models.Image.Id
:param instance: a nova.objects.instance.Instance object
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info(_LI('instance snapshotting'), instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning(_LW('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)'),
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SNAPSHOT,
phase=fields.NotificationPhase.START)
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SNAPSHOT,
phase=fields.NotificationPhase.END)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image_service = glance.get_default_image_service()
image = image_service.show(context, image_id)
if image['status'] != 'active':
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Error while trying to clean up image %s"),
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
msg = _LW("Image not found during snapshot")
LOG.warning(msg, instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: a user-defined type, like "daily" or "weekly" etc.
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in range(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
try:
self.image_api.delete(context, image_id)
except exception.ImageNotFound:
LOG.info(_LI("Failed to find image %(image_id)s to "
"delete"), {'image_id': image_id},
instance=instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info(_LI("Root password set"), instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except exception.InstanceAgentNotEnabled:
with excutils.save_and_reraise_exception():
LOG.debug('Guest agent is not enabled for the instance.',
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except exception.SetAdminPasswdNotSupported:
with excutils.save_and_reraise_exception():
LOG.info(_LI('set_admin_password is not supported '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning(_LW('set_admin_password is not implemented '
'by this driver or guest instance.'),
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception:
# Catch all here because this could be anything.
LOG.exception(_LE('set_admin_password failed'),
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning(_LW('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)'),
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info(_LI('injecting file to %s'), path,
instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning(_LW('Unable to find a different image to use for '
'rescue VM, using instance\'s current image'),
instance=instance)
rescue_image_ref = instance.image_ref
return objects.ImageMeta.from_image_ref(
context, self.image_api, rescue_image_ref)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref, clean_shutdown):
context = context.elevated()
LOG.info(_LI('Rescuing'), instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self.network_api.get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
self._get_image_name(rescue_image_meta)}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
self._set_instance_obj_error_state(context, instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info(_LI('Unrescuing'), instance=instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations, migration):
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error(_LE("Migration %s is not found during confirmation"),
migration_id, instance=instance)
quotas.rollback()
return
if migration.status == 'confirmed':
LOG.info(_LI("Migration %s is already confirmed"),
migration_id, instance=instance)
quotas.rollback()
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning(_LW("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation "
"process"),
{"status": migration.status, "id": migration_id},
instance=instance)
quotas.rollback()
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info(_LI("Instance is not found during confirmation"),
instance=instance)
quotas.rollback()
return
self._confirm_resize(context, instance, quotas,
migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, quotas,
migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
old_instance_type = instance.old_flavor
instance.old_flavor = None
instance.new_flavor = None
instance.system_metadata.pop('old_vm_state', None)
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self.network_api.get_instance_nw_info(context,
instance)
self.driver.confirm_migration(context, migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, migration.source_node,
old_instance_type, prefix='old_')
instance.drop_migration_context()
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
quotas.commit()
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration, reservations):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context,
instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(
context, instance, host=migration.source_compute)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
# NOTE(ndipanov): We need to do this here because dropping the
# claim means we lose the migration_context data. We really should
# fix this by moving the drop_move_claim call to the
# finish_revert_resize method as this is racy (revert is dropped,
# but instance resources will be tracked with the new flavor until
# it gets rolled back in finish_revert_resize, which is
# potentially wrong for a period of time).
instance.revert_migration_context()
instance.save()
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, instance.node)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,
quotas.reservations)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations, migration):
"""Finishes the second half of reverting a resize.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
self._set_instance_info(instance, instance.old_flavor)
instance.old_flavor = None
instance.new_flavor = None
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
migration_p = obj_base.obj_to_primitive(migration)
# NOTE(hanrong): we need to change migration_p['dest_compute'] to
# source host temporarily. "network_api.migrate_instance_finish"
# will setup the network for the instance on the destination host.
# For revert resize, the instance will back to the source host, the
# setup of the network for instance should be on the source host.
# So set the migration_p['dest_compute'] to source host at here.
migration_p['dest_compute'] = migration.source_compute
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context,
instance)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.drop_migration_context()
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info(_LI("Updating instance to original state: '%s'"),
old_vm_state, instance=instance)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance,
clean_shutdown=True)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_obj_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
# if the flavor IDs match, it's migrate; otherwise resize
if same_host and instance_type.id == instance['instance_type_id']:
# check driver whether support migrate to same host
if not self.driver.capabilities['supports_migrate_to_same_host']:
raise exception.UnableToMigrateToSelf(
instance_id=instance.uuid, host=self.host)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.new_flavor = instance_type
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker()
with rt.resize_claim(context, instance, instance_type, node,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
# NOTE(melwitt): Remove this in version 5.0 of the RPC API
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if not isinstance(instance_type, objects.Flavor):
instance_type = objects.Flavor.get_by_id(context,
instance_type['id'])
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type.name,
new_instance_type_id=instance_type.id)
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, quotas, request_spec, filter_properties):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type,
quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
else:
# not re-scheduling
six.reraise(*exc_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown):
"""Starts the migration of a running instance to another host."""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# TODO(chaochin) Remove this until v5 RPC API
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
network_info = self.network_api.get_instance_nw_info(context,
instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.START)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.END)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type.id
# NOTE(danms): These are purely for any legacy code that still
# looks at them.
instance.memory_mb = instance_type.memory_mb
instance.vcpus = instance_type.vcpus
instance.root_gb = instance_type.root_gb
instance.ephemeral_gb = instance_type.ephemeral_gb
instance.flavor = instance_type
def _finish_resize(self, context, instance, migration, disk_info,
image_meta):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.old_flavor = old_instance_type
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
for key in ('root_gb', 'swap', 'ephemeral_gb'):
if old_instance_type[key] != instance_type[key]:
resize_instance = True
break
instance.apply_migration_context()
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_FINISH,
phase=fields.NotificationPhase.START)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image_meta, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if old_instance_type_id != new_instance_type_id:
self._set_instance_info(instance,
old_instance_type)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_FINISH,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
image_meta = objects.ImageMeta.from_dict(image)
self._finish_resize(context, instance, migration,
disk_info, image_meta)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize"),
instance=instance)
self._set_instance_obj_error_state(context, instance)
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info(_LI('Pausing'), instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.PAUSE,
phase=fields.NotificationPhase.START)
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.PAUSE,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info(_LI('Unpausing'), instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNPAUSE,
phase=fields.NotificationPhase.START)
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNPAUSE,
phase=fields.NotificationPhase.END)
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power state',
instance_uuid=instance.uuid,
state=power_state.STATE_MAP[instance.power_state],
method='get_diagnostics')
# TODO(alaski): Remove object_compat for RPC version 5.0
@object_compat
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info(_LI("Retrieving diagnostics"), instance=instance)
diags = self.driver.get_instance_diagnostics(instance)
return diags.serialize()
else:
raise exception.InstanceInvalidState(
attr='power state',
instance_uuid=instance.uuid,
state=power_state.STATE_MAP[instance.power_state],
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SUSPEND,
phase=fields.NotificationPhase.START)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SUSPEND,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info(_LI('Resuming'), instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESUME,
phase=fields.NotificationPhase.START)
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(
context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESUME,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
@utils.synchronized(instance.uuid)
def do_shelve_instance():
self._shelve_instance(context, instance, image_id, clean_shutdown)
do_shelve_instance()
def _shelve_instance(self, context, instance, image_id,
clean_shutdown):
LOG.info(_LI('Shelving'), instance=instance)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE,
phase=fields.NotificationPhase.START)
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.utcnow().isoformat()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE,
phase=fields.NotificationPhase.END)
if CONF.shelved_offload_time == 0:
self._shelve_offload_instance(context, instance,
clean_shutdown=False)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
@utils.synchronized(instance.uuid)
def do_shelve_offload_instance():
self._shelve_offload_instance(context, instance, clean_shutdown)
do_shelve_offload_instance()
def _shelve_offload_instance(self, context, instance, clean_shutdown):
LOG.info(_LI('Shelve offloading'), instance=instance)
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.START)
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.destroy(context, instance, network_info,
block_device_info)
instance.power_state = current_power_state
# NOTE(mriedem): The vm_state has to be set before updating the
# resource tracker, see vm_states.ALLOW_RESOURCE_REMOVAL. The host/node
# values cannot be nulled out until after updating the resource tracker
# though.
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
# NOTE(ndipanov): Free resources from the resource tracker
self._update_resource_tracker(context, instance)
# NOTE(sfinucan): RPC calls should no longer be attempted against this
# instance, so ensure any calls result in errors
self._nil_out_instance_obj_host_and_node(instance)
instance.save(expected_task_state=None)
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties, node):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
LOG.info(_LI('Unshelving'), instance=instance)
self._notify_about_instance_usage(context, instance, 'unshelve.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.START)
instance.task_state = task_states.SPAWNING
instance.save()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self.driver.get_available_nodes()[0]
LOG.debug('No node specified, defaulting to %s', node,
instance=instance)
rt = self._get_resource_tracker()
limits = filter_properties.get('limits', {})
shelved_image_ref = instance.image_ref
if image:
instance.image_ref = image['id']
image_meta = objects.ImageMeta.from_dict(image)
else:
image_meta = objects.ImageMeta.from_dict(
utils.get_image_from_system_metadata(
instance.system_metadata))
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, node, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
if image:
instance.image_ref = shelved_image_ref
self._delete_snapshot_of_shelved_instance(context, instance,
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.SPAWNING)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.END)
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@messaging.expected_exceptions(NotImplementedError,
exception.ConsoleNotAvailable,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info(_LI("Get console output"), instance=instance)
output = self.driver.get_console_output(context, instance)
if type(output) is six.text_type:
output = six.b(output)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return b''
else:
return b'\n'.join(log.split(b'\n')[-int(length):])
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.vnc.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.vnc.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.vnc.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_mks_console(self, context, console_type, instance):
"""Return connection information for a MKS console."""
context = context.elevated()
LOG.debug("Getting MKS console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.mks.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'webmks':
access_url = '%s?token=%s' % (CONF.mks.mksproxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_mks_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = uuidutils.generate_uuid()
access_url = '%s?token=%s' % (CONF.serial_console.base_url, token)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type == "serial":
console_info = self.driver.get_serial_console(ctxt, instance)
elif console_type == "webmks":
console_info = self.driver.get_mks_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus, device_type):
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
# NOTE(ndipanov): We need to explicitly set all the fields on the
# object so that obj_load_attr does not fail
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid, boot_index=None,
volume_id=volume_id,
device_name=device, guest_format=None,
disk_bus=disk_bus, device_type=device_type)
new_bdm.device_name = self._get_device_name_for_instance(
instance, bdms, new_bdm)
# NOTE(vish): create bdm here to avoid race condition
new_bdm.create()
return new_bdm
return do_reserve()
@wrap_exception()
@wrap_instance_fault
def attach_volume(self, context, instance, bdm):
"""Attach a volume to an instance."""
driver_bdm = driver_block_device.convert_volume(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info(_LI('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_driver_attach=True)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
def _notify_volume_usage_detach(self, context, instance, bdm):
if CONF.volume_usage_poll_interval <= 0:
return
vol_stats = []
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
except NotImplementedError:
return
LOG.debug("Updating volume usage cache with totals", instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
vol_usage = objects.VolumeUsage(context)
vol_usage.volume_id = bdm.volume_id
vol_usage.instance_uuid = instance.uuid
vol_usage.project_id = instance.project_id
vol_usage.user_id = instance.user_id
vol_usage.availability_zone = instance.availability_zone
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
vol_usage.save(update_totals=True)
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
attachment_id=None):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param destroy_bdm: if True, the corresponding BDM entry will be marked
as deleted. Disabling this is useful for operations
like rebuild, when we don't want to destroy BDM
"""
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
self._notify_volume_usage_detach(context, instance, bdm)
LOG.info(_LI('Detaching volume %(volume_id)s'),
{'volume_id': volume_id}, instance=instance)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm.detach(context, instance, self.volume_api, self.driver,
attachment_id=attachment_id, destroy_bdm=destroy_bdm)
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
if destroy_bdm:
bdm.destroy()
@wrap_exception()
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance, attachment_id=None):
"""Detach a volume from an instance."""
self._detach_volume(context, volume_id, instance,
attachment_id=attachment_id)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
# NOTE(lyarwood): serial is not always present in the returned
# connection_info so set it if it is missing as we do in
# DriverVolumeBlockDevice.attach().
if 'serial' not in new_cinfo:
new_cinfo['serial'] = new_volume_id
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector,
old_volume_id, new_volume_id, resize_to):
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
old_volume_id,
connector,
instance,
bdm)
# NOTE(lyarwood): The Libvirt driver, the only virt driver
# currently implementing swap_volume, will modify the contents of
# new_cinfo when connect_volume is called. This is then saved to
# the BDM in swap_volume for future use outside of this flow.
LOG.debug("swap_volume: Calling driver volume swap with "
"connection infos: new: %(new_cinfo)s; "
"old: %(old_cinfo)s",
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo},
instance=instance)
self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
resize_to)
LOG.debug("swap_volume: Driver volume swap returned, new "
"connection_info is now : %(new_cinfo)s",
{'new_cinfo': new_cinfo})
except Exception as ex:
failed = True
with excutils.save_and_reraise_exception():
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationAction.VOLUME_SWAP,
fields.NotificationPhase.ERROR,
old_volume_id, new_volume_id, ex)
if new_cinfo:
msg = _LE("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
instance=instance)
else:
msg = _LE("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
self.volume_api.unreserve_volume(context, new_volume_id)
finally:
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
LOG.debug("swap_volume: calling Cinder terminate_connection "
"for %(volume)s", {'volume': conn_volume},
instance=instance)
self.volume_api.terminate_connection(context,
conn_volume,
connector)
# NOTE(lyarwood): The following call to
# os-migrate-volume-completion returns a dict containing
# save_volume_id, this volume id has two possible values :
# 1. old_volume_id if we are migrating (retyping) volumes
# 2. new_volume_id if we are swapping between two existing volumes
# This volume id is later used to update the volume_id and
# connection_info['serial'] of the BDM.
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
LOG.debug("swap_volume: Cinder migrate_volume_completion "
"returned: %(comp_ret)s", {'comp_ret': comp_ret},
instance=instance)
return (comp_ret, new_cinfo)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
"""Swap volume for an instance."""
context = context.elevated()
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationAction.VOLUME_SWAP,
fields.NotificationPhase.START,
old_volume_id, new_volume_id)
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, old_volume_id, instance.uuid)
connector = self.driver.get_volume_connector(instance)
resize_to = 0
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
LOG.info(_LI('Swapping volume %(old_volume)s for %(new_volume)s'),
{'old_volume': old_volume_id, 'new_volume': new_volume_id},
instance=instance)
comp_ret, new_cinfo = self._swap_volume(context, instance,
bdm,
connector,
old_volume_id,
new_volume_id,
resize_to)
# NOTE(lyarwood): Update the BDM with the modified new_cinfo and
# correct volume_id returned by Cinder.
save_volume_id = comp_ret['save_volume_id']
new_cinfo['serial'] = save_volume_id
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'no_device': None}
if resize_to:
values['volume_size'] = resize_to
LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with "
"%(updates)s", {'volume_id': bdm.volume_id,
'updates': values},
instance=instance)
bdm.update(values)
bdm.save()
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationAction.VOLUME_SWAP,
fields.NotificationPhase.END,
old_volume_id, new_volume_id)
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
connector = self.driver.get_volume_connector(instance)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm.driver_detach(context, instance, connector,
self.volume_api, self.driver)
self.volume_api.terminate_connection(context, volume_id, connector)
except exception.NotFound:
pass
@wrap_exception()
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
if not self.driver.capabilities['supports_attach_interface']:
raise exception.AttachInterfaceNotSupported(
instance_id=instance.uuid)
bind_host_id = self.driver.network_binding_host_id(context, instance)
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip,
bind_host_id=bind_host_id)
if len(network_info) != 1:
LOG.error(_LE('allocate_port_for_instance returned %(ports)s '
'ports'), {'ports': len(network_info)})
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_meta = objects.ImageMeta.from_instance(instance)
try:
self.driver.attach_interface(context, instance, image_meta,
network_info[0])
except exception.NovaException as ex:
port_id = network_info[0].get('id')
LOG.warning(_LW("attach interface failed , try to deallocate "
"port %(port_id)s, reason: %(msg)s"),
{'port_id': port_id, 'msg': ex},
instance=instance)
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception:
LOG.warning(_LW("deallocate port %(port_id)s failed"),
{'port_id': port_id}, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
return network_info[0]
@wrap_exception()
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach a network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
try:
self.driver.detach_interface(context, instance, condemned)
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, port_id=%(port_id)s,"
" reason: %(msg)s"),
{'port_id': port_id, 'msg': ex}, instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Since this is a cast operation, log the failure for
# triage.
LOG.warning(_LW('Failed to deallocate port %(port_id)s '
'for instance. Error: %(error)s'),
{'port_id': port_id, 'error': ex},
instance=instance)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
if None, calculate it in driver
:param disk_over_commit: if true, allow disk over commit
if None, ignore disk usage checking
:returns: a dict containing migration info
"""
return self._do_check_can_live_migrate_destination(ctxt, instance,
block_migration,
disk_over_commit)
def _do_check_can_live_migrate_destination(self, ctxt, instance,
block_migration,
disk_over_commit):
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
LOG.debug('destination check data is %s', dest_check_data)
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.cleanup_live_migration_destination_check(ctxt,
dest_check_data)
return migrate_data
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
is_volume_backed = compute_utils.is_volume_backed_instance(ctxt,
instance)
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
got_migrate_data_object = isinstance(dest_check_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
dest_check_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
dest_check_data)
dest_check_data.is_volume_backed = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=False)
result = self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
if not got_migrate_data_object:
result = result.to_legacy_dict()
LOG.debug('source check data is %s', result)
return result
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param migrate_data: A dict or LiveMigrateData object holding data
required for live migration without shared
storage.
"""
LOG.debug('pre_live_migration data is %s', migrate_data)
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
got_migrate_data_object = isinstance(migrate_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
migrate_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
LOG.debug('driver pre_live_migration data is %s', migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
if not got_migrate_data_object and migrate_data:
migrate_data = migrate_data.to_legacy_dict(
pre_migration_result=True)
migrate_data = migrate_data['pre_live_migration_result']
LOG.debug('pre_live_migration result data is %s', migrate_data)
return migrate_data
def _do_live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
# NOTE(danms): We should enhance the RT to account for migrations
# and use the status field to denote when the accounting has been
# done on source/destination. For now, this is just here for status
# reporting
self._set_migration_status(migration, 'preparing')
got_migrate_data_object = isinstance(migrate_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
try:
if ('block_migration' in migrate_data and
migrate_data.block_migration):
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
migrate_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._set_migration_status(migration, 'error')
self._rollback_live_migration(context, instance, dest,
migrate_data)
self._set_migration_status(migration, 'running')
if migrate_data:
migrate_data.migration = migration
LOG.debug('live_migration data is %s', migrate_data)
try:
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
except Exception:
LOG.exception(_LE('Live migration failed.'), instance=instance)
with excutils.save_and_reraise_exception():
# Put instance and migration into error state,
# as its almost certainly too late to rollback
self._set_migration_status(migration, 'error')
# first refresh instance as it may have got updated by
# post_live_migration_at_destination
instance.refresh()
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
"""Executing live migration.
:param context: security context
:param dest: destination host
:param instance: a nova.objects.instance.Instance object
:param block_migration: if true, prepare for block migration
:param migration: an nova.objects.Migration object
:param migrate_data: implementation specific params
"""
self._set_migration_status(migration, 'queued')
def dispatch_live_migration(*args, **kwargs):
with self._live_migration_semaphore:
self._do_live_migration(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
utils.spawn_n(dispatch_live_migration,
context, dest, instance,
block_migration, migration,
migrate_data)
# TODO(tdurakov): migration_id is used since 4.12 rpc api version
# remove migration_id parameter when the compute RPC version
# is bumped to 5.x.
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def live_migration_force_complete(self, context, instance,
migration_id=None):
"""Force live migration to complete.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of ongoing migration; is currently not used,
and isn't removed for backward compatibility
"""
self._notify_about_instance_usage(
context, instance, 'live.migration.force.complete.start')
self.driver.live_migration_force_complete(instance)
self._notify_about_instance_usage(
context, instance, 'live.migration.force.complete.end')
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def live_migration_abort(self, context, instance, migration_id):
"""Abort an in-progress live migration.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of in-progress live migration
"""
migration = objects.Migration.get_by_id(context, migration_id)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='abort live migration')
self._notify_about_instance_usage(
context, instance, 'live.migration.abort.start')
self.driver.live_migration_abort(instance)
self._notify_about_instance_usage(
context, instance, 'live.migration.abort.end')
def _live_migration_cleanup_flags(self, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(pkoniszewski): block migration specific params are set inside
# migrate_data objects for drivers that expose block live migration
# information (i.e. Libvirt, Xenapi and HyperV). For other drivers
# cleanup is not needed.
is_shared_block_storage = True
is_shared_instance_path = True
if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData):
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
elif isinstance(migrate_data, migrate_data_obj.XenapiLiveMigrateData):
is_shared_block_storage = not migrate_data.block_migration
is_shared_instance_path = not migrate_data.block_migration
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):
is_shared_instance_path = migrate_data.is_shared_instance_path
is_shared_block_storage = migrate_data.is_shared_instance_path
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info(_LI('_post_live_migration() is started..'),
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
# remove the volume connection without detaching from hypervisor
# because the instance is not running anymore on the current host
if bdm.is_volume:
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
connector)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self.network_api.get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
LOG.debug('Calling driver.unfilter_instance from _post_live_migration',
instance=instance)
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
try:
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
except Exception as error:
# We don't want to break _post_live_migration() if
# post_live_migration_at_destination() fails as it should never
# affect cleaning up source node.
LOG.exception(_LE("Post live migration at destination %s failed"),
dest, instance=instance, error=error)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
LOG.debug('Calling driver.cleanup from _post_live_migration',
instance=instance)
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._update_scheduler_instance_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
LOG.info(_LI('Migrating instance to %s finished successfully.'),
dest, instance=instance)
LOG.info(_LI("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance)
if migrate_data and migrate_data.obj_attr_is_set('migration'):
migrate_data.migration.status = 'completed'
migrate_data.migration.save()
def _consoles_enabled(self):
"""Returns whether a console is enable."""
return (CONF.vnc.enabled or CONF.spice.enabled or
CONF.rdp.enabled or CONF.serial_console.enabled or
CONF.mks.enabled)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info(_LI('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
try:
self.driver.post_live_migration_at_destination(
context, instance, network_info, block_migration,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
instance.vm_state = vm_states.ERROR
LOG.error(_LE('Unexpected error during post live migration at '
'destination host.'), instance=instance)
finally:
# Restore instance state and update host
current_power_state = self._get_power_state(context, instance)
node_name = None
prev_host = instance.host
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.progress = 0
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
prev_host, teardown=True)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, migrate_data=None,
migration_status='error'):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param migrate_data:
if not none, contains implementation specific data.
:param migration_status:
Contains the status we want to set for the migration object
"""
instance.task_state = None
instance.progress = 0
instance.save(expected_task_state=[task_states.MIGRATING])
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
if isinstance(migrate_data, dict):
migration = migrate_data.pop('migration', None)
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
elif (isinstance(migrate_data, migrate_data_obj.LiveMigrateData) and
migrate_data.obj_attr_is_set('migration')):
migration = migrate_data.migration
else:
migration = None
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
self._set_migration_status(migration, migration_status)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks,
migrate_data):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
"""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
try:
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(tdurakov): even if teardown networks fails driver
# should try to rollback live migration on destination.
LOG.exception(
_LE('An error occurred while deallocating network.'),
instance=instance)
finally:
# always run this even if setup_networks_on_host fails
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
# TODO(tdurakov): remove dict to object conversion once RPC API
# version is bumped to 5.x
if isinstance(migrate_data, dict):
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache',
'flavor'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self.network_api.get_instance_nw_info(context, instance)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except exception.InstanceInfoCacheNotFound:
# InstanceInfoCache is gone.
LOG.debug('InstanceInfoCache no longer exists. '
'Unable to refresh', instance=instance)
except Exception:
LOG.error(_LE('An error occurred while refreshing the network '
'cache.'), instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_LI("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning(_LW("Setting migration %(migration_id)s to error: "
"%(reason)s"),
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info(_LI("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info(_LI("Error auto-confirming resize: %s. "
"Will retry later."),
e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'task_state': None,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=(None,))
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
begin, end = utils.last_completed_audit_period()
if objects.TaskLog.get(context, 'instance_usage_audit', begin, end,
self.host):
return
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata',
'flavor'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_LI("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances."),
{'host': self.host,
'begin_time': begin,
'end_time': end,
'number_instances': num_instances})
start_time = time.time()
task_log = objects.TaskLog(context)
task_log.task_name = 'instance_usage_audit'
task_log.period_beginning = begin
task_log.period_ending = end
task_log.host = self.host
task_log.task_items = num_instances
task_log.message = 'Instance usage audit started...'
task_log.begin_task()
for instance in instances:
try:
compute_utils.notify_usage_exists(
self.notifier, context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_LE('Failed to generate usage '
'audit for instance '
'on host %s'), self.host,
instance=instance)
errors += 1
task_log.errors = errors
task_log.message = (
'Instance usage audit ran for host %s, %s instances in %s seconds.'
% (self.host, num_instances, time.time() - start_time))
task_log.end_task()
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_LI("Updating bandwidth usage cache"))
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.info(_LI("Bandwidth usage not supported by "
"hypervisor."))
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
vol_usage = objects.VolumeUsage(context)
vol_usage.volume_id = usage['volume']
vol_usage.instance_uuid = usage['instance'].uuid
vol_usage.project_id = usage['instance'].project_id
vol_usage.user_id = usage['instance'].user_id
vol_usage.availability_zone = usage['instance'].availability_zone
vol_usage.curr_reads = usage['rd_req']
vol_usage.curr_read_bytes = usage['rd_bytes']
vol_usage.curr_writes = usage['wr_req']
vol_usage.curr_write_bytes = usage['wr_bytes']
vol_usage.save()
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context):
if CONF.volume_usage_poll_interval == 0:
return
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning(_LW("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor."),
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
query_driver_power_state_and_sync()
except Exception:
LOG.exception(_LE("Periodic sync_power_state task had an "
"error while processing an instance."),
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s', uuid)
else:
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_LI("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s"),
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_LI("During sync_power_state the instance has a "
"pending task (%(task)s). Skip."),
{'task': db_instance.task_state},
instance=db_instance)
return
orig_db_power_state = db_power_state
if vm_power_state != db_power_state:
LOG.info(_LI('During _sync_instance_power_state the DB '
'power_state (%(db_power_state)s) does not match '
'the vm_power_state from the hypervisor '
'(%(vm_power_state)s). Updating power_state in the '
'DB to match the hypervisor.'),
{'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warning(_LW("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning(_LW("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning(_LW("Instance is unexpectedly not found. Ignore."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s"),
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a stopped
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning(_LW("Paused instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
LOG.exception(_LE("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning(_LW("Instance is not (soft-)deleted."),
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
# TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
# The only case that the quota might be inconsistent is
# the compute node died between set instance state to SOFT_DELETED
# and quota commit to DB. When compute node starts again
# it will have no idea the reservation is committed or not or even
# expired, since it's a rare case, so marked as todo.
quotas = objects.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info(_LI('Reclaiming deleted instance'), instance=instance)
try:
self._delete_instance(context, instance, bdms, quotas)
except Exception as e:
LOG.warning(_LW("Periodic reclaim failed to delete "
"instance: %s"),
e, instance=instance)
def update_available_resource_for_node(self, context, nodename):
rt = self._get_resource_tracker()
try:
rt.update_available_resource(context, nodename)
except exception.ComputeHostNotFound:
# NOTE(comstud): We can get to this case if a node was
# marked 'deleted' in the DB and then re-added with a
# different auto-increment id. The cached resource
# tracker tried to update a deleted record and failed.
# Don't add this resource tracker to the new dict, so
# that this will resolve itself on the next run.
LOG.info(_LI("Compute node '%s' not found in "
"update_available_resource."), nodename)
# TODO(jaypipes): Yes, this is inefficient to throw away all of the
# compute nodes to force a rebuild, but this is only temporary
# until Ironic baremetal node resource providers are tracked
# properly in the report client and this is a tiny edge case
# anyway.
self._resource_tracker = None
return
except Exception:
LOG.exception(_LE("Error updating resources for node "
"%(node)s."), {'node': nodename})
@periodic_task.periodic_task(spacing=CONF.update_resources_interval)
def update_available_resource(self, context, startup=False):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
:param startup: True if this is being called when the nova-compute
service is starting, False otherwise.
"""
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True,
startup=startup)
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
self.update_available_resource_for_node(context, nodename)
# Delete orphan compute node not reported by driver but still in db
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info(_LI("Deleting orphan compute node %(id)s "
"hypervisor host is %(hh)s, "
"nodes are %(nodes)s"),
{'id': cn.id, 'hh': cn.hypervisor_hostname,
'nodes': nodenames})
cn.destroy()
# Delete the corresponding resource provider in placement,
# along with any associated allocations and inventory.
# TODO(cdent): Move use of reportclient into resource tracker.
self.scheduler_client.reportclient.delete_resource_provider(
context, cn, cascade=True)
def _get_compute_nodes_in_db(self, context, use_slave=False,
startup=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
if startup:
LOG.warning(
_LW("No compute node record found for host %s. If this is "
"the first time this service is starting on this "
"host, then you can ignore this warning."), self.host)
else:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning(_LW("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info(_LI("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.debug("set_bootable is not implemented "
"for the current driver")
# and power it off
self.driver.power_off(instance)
except Exception:
msg = _LW("Failed to power off instance")
LOG.warning(msg, instance=instance, exc_info=True)
elif action == 'reap':
LOG.info(_LI("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance.uuid, bdms)
except Exception as e:
LOG.warning(_LW("Periodic cleanup failed to delete "
"instance: %s"),
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False,
'host': self.host}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance.deleted_at
if deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
quotas=None,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to %(state)s after: "
"%(error)s"),
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
if quotas:
quotas.rollback()
LOG.info(_LI("Setting instance back to ACTIVE after: %s"),
error, instance_uuid=instance_uuid)
self._instance_update(context, instance,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
self._set_instance_obj_error_state(context, instance)
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
else:
LOG.warning(_LW('Received unexpected event %(event)s for '
'instance'),
{'event': event.key}, instance=instance)
def _process_instance_vif_deleted_event(self, context, instance,
deleted_vif_id):
# If an attached port is deleted by neutron, it needs to
# be detached from the instance.
# And info cache needs to be updated.
network_info = instance.info_cache.network_info
for index, vif in enumerate(network_info):
if vif['id'] == deleted_vif_id:
LOG.info(_LI('Neutron deleted interface %(intf)s; '
'detaching it from the instance and '
'deleting it from the info cache'),
{'intf': vif['id']},
instance=instance)
del network_info[index]
base_net_api.update_instance_cache_with_nw_info(
self.network_api, context,
instance,
nw_info=network_info)
try:
self.driver.detach_interface(context, instance, vif)
except NotImplementedError:
# Not all virt drivers support attach/detach of interfaces
# yet (like Ironic), so just ignore this.
pass
except exception.NovaException as ex:
LOG.warning(_LW("Detach interface failed, "
"port_id=%(port_id)s, reason: %(msg)s"),
{'port_id': deleted_vif_id, 'msg': ex},
instance=instance)
break
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
try:
self.network_api.get_instance_nw_info(context, instance)
except exception.NotFound as e:
LOG.info(_LI('Failed to process external instance event '
'%(event)s due to: %(error)s'),
{'event': event.key, 'error': six.text_type(e)},
instance=instance)
elif event.name == 'network-vif-deleted':
try:
self._process_instance_vif_deleted_event(context,
instance,
event.tag)
except exception.NotFound as e:
LOG.info(_LI('Failed to process external instance event '
'%(event)s due to: %(error)s'),
{'event': event.key, 'error': six.text_type(e)},
instance=instance)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
# TODO(raj_singh): Remove this if condition when min value is
# introduced to "maximum_instance_delete_attempts" cfg option.
if CONF.maximum_instance_delete_attempts < 1:
LOG.warning(_LW('Future versions of Nova will restrict the '
'"maximum_instance_delete_attempts" config option '
'to values >=1. Update your configuration file to '
'mitigate future upgrade issues.'))
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_incomplete_migrations(self, context):
"""Delete instance files on failed resize/revert-resize operation
During resize/revert-resize operation, if that instance gets deleted
in-between then instance files might remain either on source or
destination compute node because of race condition.
"""
LOG.debug('Cleaning up deleted instances with incomplete migration ')
migration_filters = {'host': CONF.host,
'status': 'error'}
migrations = objects.MigrationList.get_by_filters(context,
migration_filters)
if not migrations:
return
inst_uuid_from_migrations = set([migration.instance_uuid for migration
in migrations])
inst_filters = {'deleted': True, 'soft_deleted': False,
'uuid': inst_uuid_from_migrations}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
for instance in instances:
if instance.host != CONF.host:
for migration in migrations:
if instance.uuid == migration.instance_uuid:
# Delete instance files if not cleanup properly either
# from the source or destination compute nodes when
# the instance is deleted during resizing.
self.driver.delete_instance_files(instance)
try:
migration.status = 'failed'
with migration.obj_as_admin():
migration.save()
except exception.MigrationNotFound:
LOG.warning(_LW("Migration %s is not found."),
migration.id,
instance=instance)
break
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.unquiesce(context, instance, image_meta)
| vmturbo/nova | nova/compute/manager.py | Python | apache-2.0 | 326,877 |
from __future__ import unicode_literals
from mopidy_gpiocont import Extension, frontend as frontend_lib
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert '[gpiocont]' in config
assert 'enabled = true' in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
# TODO Test the content of your config schema
#assert 'username' in schema
#assert 'password' in schema
# TODO Write more tests
| JasperGerth/Mopidy-GPIOcont | tests/test_extension.py | Python | apache-2.0 | 507 |
from a10sdk.common.A10BaseClass import A10BaseClass
class IpStats(A10BaseClass):
"""Class Description::
Statistics for the object ip-stats.
Class ip-stats supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/system/ip-stats/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ip-stats"
self.a10_url="/axapi/v3/system/ip-stats/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/system/system_ip_stats_stats.py | Python | apache-2.0 | 850 |
import base as Base
class ChangeX(Base.BaseClass):
def __init__(self):
self.x = 17
class ChangeHam(Base.BaseClass):
def __init__(self):
super(BetterHam, self).__init__()
def printHam(self):
print ('Ham2')
class Combo(ChangeX, ChangeHam):
def __init__(self):
super(Combo, self).__init__()
if __name__ == '__main__':
c = Combo()
c.printHam()
print (c.x)
print (Base.BaseClass.__subclasses__())
| Valka7a/python-playground | let's-learn-python/inheritance-oop-2/challenge/classes.py | Python | mit | 462 |
from esp import neopixel_write
import machine
GAMMA = 2.8
GAMMA_SHIFT = 40
class Led(object):
def __init__(self, hsv=(0, 0, 0), rgb=None):
self.set_hsv(0, 0, 0)
def set_hsv(self, *hsv):
self.h = hsv[0]
self.s = hsv[1]
self.v = hsv[2]
def to_rgb(self):
return hsv_to_rgb_rainbow_opt(self.h, self.s, self.v)
class LedStrip:
def __init__(self, pin, num_leds):
self.pin = machine.Pin(pin, machine.Pin.OUT)
self.num_leds = num_leds
self.leds = []
for i in range(num_leds):
self.leds.append(Led())
def blackout(self):
for i in range(self.num_leds):
self.leds[i].set_hsv(0, 0, 0)
self.blit()
def set_all_leds(self, h, s, v):
for i in range(self.num_leds):
self.leds[i].set_hsv(h, s, v)
def _get_buff(self):
buff = bytearray(self.num_leds * 3)
for i in range(self.num_leds):
r, g, b = self.leds[i].to_rgb()
buff[i * 3] = g
buff[i * 3 + 1] = r
buff[i * 3 + 2] = b
return buff
def blit(self):
neopixel_write(self.pin, self._get_buff(), True)
def scale_gamma(val):
return int((((val + GAMMA_SHIFT)/(255 + GAMMA_SHIFT)) ** GAMMA) * 255)
def hsv_to_rgb_rainbow_opt(hue, sat, val):
"""Convert HSV to RGB with constant brightness in rainbow mode."""
K255 = 255
K171 = 171
K170 = 170
K85 = 85
offset = hue & 0x1F
offset8 = offset << 3
third = offset8 // 3
r = g = b = 0
if (hue & 0x80) == 0:
if (hue & 0x40) == 0:
if (hue & 0x20) == 0:
r = K255 - third
g = third
b = 0
else:
r = K171
g = K85 + third
b = 0
else:
if (hue & 0x20) == 0:
twothirds = offset8 * 2 // 3 # 256 * 2 / 3
r = K171 - twothirds
g = K170 + third
b = 0
else:
r = 0
g = K255 - third
b = third
else:
if (hue & 0x40) == 0:
if (hue & 0x20) == 0:
r = 0
twothirds = offset8 * 2 // 3
g = K171 - twothirds
b = K85 + twothirds
else:
r = third
g = 0
b = K255 - third
else:
if (hue & 0x20) == 0:
r = K85 + third
g = 0
b = K171 - third
else:
r = K170 + third
g = 0
b = K85 - third
if sat != 255:
if sat == 0:
r = b = g = 255
else:
if r:
r = r * sat >> 8
if g:
g = g * sat >> 8
if b:
b = b * sat >> 8
desat = 255 - sat
desat = desat * desat >> 8
brightness_floor = desat
r += brightness_floor
g += brightness_floor
b += brightness_floor
if val != 255:
val = scale_gamma(val)
if val == 0:
r = g = b = 0
else:
if r:
r = r * val >> 8
if g:
g = g * val >> 8
if b:
b = b * val >> 8
return (r, g, b)
| numero-trey/py8266 | ledpy.py | Python | gpl-3.0 | 3,405 |
import random
from random import randint
import sys
#4000 4 0.005 0.0025
num_nodes = 1000
num_partition = 4
print num_nodes
intra_prop = 0.01
inter_prop = 0.01
ans = []
cnt = 0
for i in range(num_nodes):
ans.append([])
for i in range(num_nodes):
for j in range(i + 1, num_nodes):
first = i % num_partition
second = j % num_partition
rad = random.random()
if first == second:
if rad < intra_prop:
if j not in ans[i]:
cnt = cnt + 1
ans[i].append(j)
if i not in ans[j]:
ans[j].append(i)
else:
if rad < inter_prop:
if j not in ans[i]:
cnt = cnt + 1
ans[i].append(j)
if i not in ans[j]:
ans[j].append(i)
for i in range(num_nodes):
res = str(len(ans[i])) + " "
for v in ans[i]:
res = res + " " + str(v)
print res
| simp1eton/CS224W_Final_Project | DATA/stochastic_block_model_6.py | Python | mit | 842 |
import sys
sys.path.append("../src")
from gehol import GeholProxy
from gehol.converters.utils import write_content_to_file
from gehol.converters.rfc5545icalwriter import convert_geholcalendar_to_ical
if __name__=="__main__":
print 'import calendar test --> csv files'
all_courses = ['INFOH500','BIMEH404','STATH400', 'COMMB411', 'TRANH100', 'INFOH100', 'TRANH100']
host = '164.15.72.157:8081'
first_monday = '19/09/2011'
gehol_proxy = GeholProxy(host)
for course in all_courses:
print "fetching events for course %s" % course
cal = gehol_proxy.get_course_calendar(course)
dest_filename = '%s.ics' % course
ical = convert_geholcalendar_to_ical(cal, first_monday)
print "Saving %s events to %s" % (course, dest_filename)
ical_data = ical.as_string()
write_content_to_file(ical_data, dest_filename)
| Psycojoker/geholparser | examples/test_ical_writer.py | Python | mit | 881 |
class LinkedList:
def __init__(self, data):
self.data = data
def append(self, data):
self.next = LinkedList(data)
test = LinkedList(5)
test.next.data
| jrahm/DuckTest | tests/test_ll.py | Python | bsd-2-clause | 177 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : mederic.ribreux@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <brush.tyler@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# this will disable the dbplugin if the connector raise an ImportError
from .connector import OracleDBConnector
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, \
Database, Schema, Table, VectorTable, TableField, TableConstraint, \
TableIndex, TableTrigger, TableRule
try:
from . import resources_rc
except ImportError:
pass
from ..html_elems import HtmlParagraph, HtmlList, HtmlTable
from qgis.core import QgsCredentials
def classFactory():
return OracleDBPlugin
class OracleDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QIcon(":/db_manager/oracle/icon")
@classmethod
def typeName(self):
return 'oracle'
@classmethod
def typeNameString(self):
return 'Oracle Spatial'
@classmethod
def providerName(self):
return 'oracle'
@classmethod
def connectionSettingsKey(self):
return '/Oracle/connections'
def connectToUri(self, uri):
self.db = self.databasesFactory(self, uri)
if self.db:
return True
return False
def databasesFactory(self, connection, uri):
return ORDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QSettings()
settings.beginGroup(u"/{0}/{1}".format(
self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(
self.tr('There is no defined database connection "{}".'.format(
conn_name)))
from qgis.core import QgsDataSourceURI
uri = QgsDataSourceURI()
settingsList = ["host", "port", "database", "username", "password"]
host, port, database, username, password = map(
lambda x: settings.value(x, "", type=str), settingsList)
# qgis1.5 use 'savePassword' instead of 'save' setting
savedPassword = settings.value("save", False, type=bool) or \
settings.value("savePassword", False, type=bool)
# get all of the connexion options
useEstimatedMetadata = settings.value(
"estimatedMetadata", False, type=bool)
uri.setParam('userTablesOnly', unicode(
settings.value("userTablesOnly", False, type=bool)))
uri.setParam('geometryColumnsOnly', unicode(
settings.value("geometryColumnsOnly", False, type=bool)))
uri.setParam('allowGeometrylessTables', unicode(
settings.value("allowGeometrylessTables", False, type=bool)))
uri.setParam('onlyExistingTypes', unicode(
settings.value("onlyExistingTypes", False, type=bool)))
settings.endGroup()
uri.setConnection(host, port, database, username, password)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
err = u""
try:
return self.connectToUri(uri)
except ConnectionError as e:
err = unicode(e)
# ask for valid credentials
max_attempts = 3
for i in range(max_attempts):
(ok, username, password) = QgsCredentials.instance().get(
uri.connectionInfo(False), username, password, err)
if not ok:
return False
uri.setConnection(host, port, database, username, password)
try:
self.connectToUri(uri)
except ConnectionError as e:
if i == max_attempts - 1: # failed the last attempt
raise e
err = unicode(e)
continue
QgsCredentials.instance().put(
uri.connectionInfo(False), username, password)
return True
return False
class ORDatabase(Database):
def __init__(self, connection, uri):
self.connName = connection.connectionName()
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return OracleDBConnector(uri, self.connName)
def dataTablesFactory(self, row, db, schema=None):
return ORTable(row, db, schema)
def vectorTablesFactory(self, row, db, schema=None):
return ORVectorTable(row, db, schema)
def info(self):
from .info_model import ORDatabaseInfo
return ORDatabaseInfo(self)
def schemasFactory(self, row, db):
return ORSchema(row, db)
def columnUniqueValuesModel(self, col, table, limit=10):
l = u""
if limit:
l = u"WHERE ROWNUM < {:d}".format(limit)
con = self.database().connector
# Prevent geometry column show
tableName = table.replace(u'"', u"").split(u".")
if len(tableName) == 0:
tableName = [None, tableName[0]]
colName = col.replace(u'"', u"").split(u".")[-1]
if con.isGeometryColumn(tableName, colName):
return None
query = u"SELECT DISTINCT {} FROM {} {}".format(col, table, l)
return self.sqlResultModel(query, self)
def sqlResultModel(self, sql, parent):
from .data_model import ORSqlResultModel
return ORSqlResultModel(self, sql, parent)
def toSqlLayer(self, sql, geomCol, uniqueCol,
layerName=u"QueryLayer", layerType=None,
avoidSelectById=False, filter=""):
from qgis.core import QgsMapLayer, QgsVectorLayer
uri = self.uri()
con = self.database().connector
uri.setDataSource(u"", u"({})".format(sql), geomCol, filter, uniqueCol.strip(u'"'))
if avoidSelectById:
uri.disableSelectAtId(True)
provider = self.dbplugin().providerName()
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
# handling undetermined geometry type
if not vlayer.isValid():
wkbType, srid = con.getTableMainGeomType(
u"({})".format(sql), geomCol)
uri.setWkbType(wkbType)
if srid:
uri.setSrid(unicode(srid))
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
return vlayer
def registerDatabaseActions(self, mainWindow):
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Re-connect"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Database"), self.reconnectActionSlot)
if self.schemas():
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Create schema"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.createSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Delete (empty) schema"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.deleteSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "Delete selected item"), self)
mainWindow.registerAction(action, None, self.deleteActionSlot)
action.setShortcuts(QKeySequence.Delete)
action = QAction(QIcon(":/db_manager/actions/create_table"),
QApplication.translate(
"DBManagerPlugin", "&Create table"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.createTableActionSlot)
action = QAction(QIcon(":/db_manager/actions/edit_table"),
QApplication.translate(
"DBManagerPlugin", "&Edit table"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.editTableActionSlot)
action = QAction(QIcon(":/db_manager/actions/del_table"),
QApplication.translate(
"DBManagerPlugin", "&Delete table/view"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.deleteTableActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Empty table"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.emptyTableActionSlot)
class ORSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
# self.oid, self.name, self.owner, self.perms, self.comment = row
self.name = row[0]
class ORTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, self.owner, isView = row
self.estimatedRowCount = None
self.objectType = None
self.isView = False
self.isMaterializedView = False
if isView == 1:
self.isView = True
self.creationDate = None
self.modificationDate = None
def getDates(self):
"""Grab the creation/modification dates of the table"""
self.creationDate, self.modificationDate = (
self.database().connector.getTableDates((self.schemaName(),
self.name)))
def refreshRowEstimation(self):
"""Use ALL_ALL_TABLE to get an estimation of rows"""
if self.isView:
self.estimatedRowCount = 0
self.estimatedRowCount = (
self.database().connector.getTableRowEstimation(
(self.schemaName(), self.name)))
def getType(self):
"""Grab the type of object for the table"""
self.objectType = self.database().connector.getTableType(
(self.schemaName(), self.name))
def getComment(self):
"""Grab the general comment of the table/view"""
self.comment = self.database().connector.getTableComment(
(self.schemaName(), self.name), self.objectType)
def getDefinition(self):
return self.database().connector.getDefinition(
(self.schemaName(), self.name), self.objectType)
def getMViewInfo(self):
if self.objectType == u"MATERIALIZED VIEW":
return self.database().connector.getMViewInfo(
(self.schemaName(), self.name))
else:
return None
def runAction(self, action):
action = unicode(action)
if action.startswith("rows/"):
if action == "rows/recount":
self.refreshRowCount()
return True
elif action.startswith("index/"):
parts = action.split('/')
index_name = parts[1]
index_action = parts[2]
msg = QApplication.translate(
"DBManagerPlugin",
"Do you want to {} index {}?".format(
index_action, index_name))
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(
None,
QApplication.translate(
"DBManagerPlugin", "Table Index"),
msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if index_action == "rebuild":
self.aboutToChange()
self.database().connector.rebuildTableIndex(
(self.schemaName(), self.name), index_name)
self.refreshIndexes()
return True
elif action.startswith(u"mview/"):
if action == "mview/refresh":
self.aboutToChange()
self.database().connector.refreshMView(
(self.schemaName(), self.name))
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return ORTableField(row, table)
def tableConstraintsFactory(self, row, table):
return ORTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return ORTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return ORTableTrigger(row, table)
def info(self):
from .info_model import ORTableInfo
return ORTableInfo(self)
def tableDataModel(self, parent):
from .data_model import ORTableDataModel
return ORTableDataModel(self, parent)
def getValidQGisUniqueFields(self, onlyOne=False):
""" list of fields valid to load the table as layer in QGis canvas.
QGis automatically search for a valid unique field, so it's
needed only for queries and views.
"""
ret = []
# add the pk
pkcols = filter(lambda x: x.primaryKey, self.fields())
if len(pkcols) == 1:
ret.append(pkcols[0])
# then add integer fields with an unique index
indexes = self.indexes()
if indexes is not None:
for idx in indexes:
if idx.isUnique and len(idx.columns) == 1:
fld = idx.fields()[idx.columns[0]]
if (fld.dataType == u"NUMBER"
and not fld.modifier
and fld.notNull
and fld not in ret):
ret.append(fld)
# and finally append the other suitable fields
for fld in self.fields():
if (fld.dataType == u"NUMBER"
and not fld.modifier
and fld.notNull
and fld not in ret):
ret.append(fld)
if onlyOne:
return ret[0] if len(ret) > 0 else None
return ret
def uri(self):
uri = self.database().uri()
schema = self.schemaName() if self.schemaName() else ''
geomCol = self.geomColumn if self.type in [
Table.VectorType, Table.RasterType] else ""
uniqueCol = self.getValidQGisUniqueFields(
True) if self.isView else None
uri.setDataSource(schema, self.name, geomCol if geomCol else None,
None, uniqueCol.name if uniqueCol else "")
# Handle geographic table
if geomCol:
uri.setWkbType(self.wkbType)
uri.setSrid(unicode(self.srid))
return uri
class ORVectorTable(ORTable, VectorTable):
def __init__(self, row, db, schema=None):
ORTable.__init__(self, row[0:3], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.wkbType, self.geomDim, \
self.srid = row[-7:-2]
def info(self):
from .info_model import ORVectorTableInfo
return ORVectorTableInfo(self)
def runAction(self, action):
if action.startswith("extent/"):
if action == "extent/update":
self.aboutToChange()
self.updateExtent()
return True
if ORTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
def canUpdateMetadata(self):
return self.database().connector.canUpdateMetadata((self.schemaName(),
self.name))
def updateExtent(self):
self.database().connector.updateMetadata(
(self.schemaName(), self.name),
self.geomColumn, extent=self.extent)
self.refreshTableEstimatedExtent()
self.refresh()
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column else self.geomColumn
for idx in self.indexes():
if geom_column == idx.column:
return True
return False
class ORTableField(TableField):
def __init__(self, row, table):
""" build fields information from query and find primary key """
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, \
self.modifier, self.notNull, self.hasDefault, \
self.default, typeStr, self.comment = row
self.primaryKey = False
self.num = int(self.num)
if isinstance(self.charMaxLen, QPyNullVariant):
self.charMaxLen = None
else:
self.charMaxLen = int(self.charMaxLen)
if isinstance(self.modifier, QPyNullVariant):
self.modifier = None
else:
self.modifier = int(self.modifier)
if self.notNull.upper() == u"Y":
self.notNull = False
else:
self.notNull = True
if isinstance(self.comment, QPyNullVariant):
self.comment = u""
# find out whether fields are part of primary key
for con in self.table().constraints():
if (con.type == ORTableConstraint.TypePrimaryKey
and self.name == con.column):
self.primaryKey = True
break
def type2String(self):
if (u"TIMESTAMP" in self.dataType
or self.dataType in [u"DATE", u"SDO_GEOMETRY",
u"BINARY_FLOAT", u"BINARY_DOUBLE"]):
return u"{}".format(self.dataType)
if self.charMaxLen in [None, -1]:
return u"{}".format(self.dataType)
elif self.modifier in [None, -1, 0]:
return u"{}({})".format(self.dataType, self.charMaxLen)
return u"{}({},{})".format(self.dataType, self.charMaxLen,
self.modifier)
def update(self, new_name, new_type_str=None, new_not_null=None,
new_default_str=None):
self.table().aboutToChange()
if self.name == new_name:
new_name = None
if self.type2String() == new_type_str:
new_type_str = None
if self.notNull == new_not_null:
new_not_null = None
if self.default2String() == new_default_str:
new_default_str = None
ret = self.table().database().connector.updateTableColumn(
(self.table().schemaName(), self.table().name),
self.name, new_name, new_type_str,
new_not_null, new_default_str)
# When changing a field, refresh also constraints and
# indexes.
if ret is not False:
self.table().refreshFields()
self.table().refreshConstraints()
self.table().refreshIndexes()
return ret
class ORTableConstraint(TableConstraint):
TypeCheck, TypeForeignKey, TypePrimaryKey, \
TypeUnique, TypeUnknown = range(5)
types = {"c": TypeCheck, "r": TypeForeignKey,
"p": TypePrimaryKey, "u": TypeUnique}
def __init__(self, row, table):
""" build constraints info from query """
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.column, self.validated, \
self.generated, self.status = row[0:6]
constr_type_str = constr_type_str.lower()
if constr_type_str in ORTableConstraint.types:
self.type = ORTableConstraint.types[constr_type_str]
else:
self.type = ORTableConstraint.TypeUnknown
if isinstance(row[6], QPyNullVariant):
self.checkSource = u""
else:
self.checkSource = row[6]
if isinstance(row[8], QPyNullVariant):
self.foreignTable = u""
else:
self.foreignTable = row[8]
if isinstance(row[7], QPyNullVariant):
self.foreignOnDelete = u""
else:
self.foreignOnDelete = row[7]
if isinstance(row[9], QPyNullVariant):
self.foreignKey = u""
else:
self.foreignKey = row[9]
def type2String(self):
if self.type == ORTableConstraint.TypeCheck:
return QApplication.translate("DBManagerPlugin", "Check")
if self.type == ORTableConstraint.TypePrimaryKey:
return QApplication.translate("DBManagerPlugin", "Primary key")
if self.type == ORTableConstraint.TypeForeignKey:
return QApplication.translate("DBManagerPlugin", "Foreign key")
if self.type == ORTableConstraint.TypeUnique:
return QApplication.translate("DBManagerPlugin", "Unique")
return QApplication.translate("DBManagerPlugin", 'Unknown')
def fields(self):
""" Hack to make edit dialog box work """
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, self.column, self.indexType, self.status, \
self.analyzed, self.compression, self.isUnique = row
def fields(self):
""" Hack to make edit dialog box work """
self.table().refreshFields()
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.event, self.type, self.enabled = row
| SebDieBln/QGIS | python/plugins/db_manager/db_plugins/oracle/plugin.py | Python | gpl-2.0 | 22,853 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
# Module API
def ensure_dir(path):
"""Ensure directory exists.
Args:
path (str): file path inside the directory to ensure
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
| sirex/datapackage-py | datapackage/helpers.py | Python | mit | 448 |
import agents as ag
import envgui as gui
# change this line ONLY to refer to your project
import submissions.Porter.vacuum2 as v2
# ______________________________________________________________________________
# Vacuum environmenty
class Dirt(ag.Thing):
pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt,
# ReflexVacuumAgent, RandomVacuumAgent,
# TableDrivenVacuumAgent, ModelBasedVacuumAgent
]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
# # Launch a Text-Based Environment
# print('Two Cells, Agent on Left:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Right:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (2, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Top:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Bottom:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 2))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
def testVacuum(label, w=4, h=3,
dloc=[(1,1),(2,1)],
vloc=(1,1),
limit=6):
print(label)
v = VacuumEnvironment(w, h)
for loc in dloc:
v.add_thing(Dirt(), loc)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
v.add_thing(a, vloc)
t = gui.EnvTUI(v)
t.mapImageNames({
ag.Wall: '#',
Dirt: '@',
ag.Agent: 'V',
})
t.step(0)
t.list_things(Dirt)
t.step(limit)
if len(t.env.get_things(Dirt)) > 0:
t.list_things(Dirt)
else:
print('All clean!')
# Check to continue
if input('Do you want to continue [Y/n]? ') == 'n':
exit(0)
else:
print('----------------------------------------')
testVacuum('Two Cells, Agent on Left:')
testVacuum('Two Cells, Agent on Right:', vloc=(2,1))
testVacuum('Two Cells, Agent on Top:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,1) )
testVacuum('Two Cells, Agent on Bottom:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,2) )
testVacuum('Five Cells, Agent on Left:', w=7, h=3,
dloc=[(2,1), (4,1)], vloc=(1,1), limit=12)
testVacuum('Five Cells, Agent near Right:', w=7, h=3,
dloc=[(2,1), (3,1)], vloc=(4,1), limit=12)
testVacuum('Five Cells, Agent on Top:', w=3, h=7,
dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 )
testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7,
dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 )
testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6,
dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)],
vloc=(1,1), limit=46 )
testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6,
dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)],
vloc=(4, 3), limit=46 )
v = VacuumEnvironment(6, 3)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'images/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop() | austinban/aima-python | submissions/Porter/vacuum2Runner.py | Python | mit | 6,343 |
import itertools
import pyglet
from pyglet.gl import *
from freezegame.image_loader import *
import freezegame.resources
from freezegame.image_loader import ImageLoader
from freezegame.abstract_state import AbstractState
from freezegame.sprite import Sprite
from freezegame.tile_map import TileMap
from freezegame.broad_phase_collision import RDC
import math
pyglet.resource.path = ["./sample_graphics"]
pyglet.resource.reindex()
freezegame.resources.images = ImageLoader("sample_graphics")
platform = pyglet.window.get_platform()
debug_log = open('debug.txt', 'w')
print(platform)
debug_log.write(str(platform))
display = platform.get_default_display()
print(display)
debug_log.write(str(display))
screen = display.get_default_screen()
debug_log.write(str(screen))
print(str(screen))
debug_log.close()
template = pyglet.gl.Config(double_buffer=True)
config = screen.get_best_config(template=template)
window = pyglet.window.Window(1024, 768, resizable=False, config=config, vsync=True)
icon16 = pyglet.image.load('sample_graphics/pybaconIcon16.png')
icon32 = pyglet.image.load('sample_graphics/pybaconIcon32.png')
window.set_icon(icon16, icon32)
window.set_caption("Freezegame Sample")
keys = pyglet.window.key.KeyStateHandler()
window.push_handlers(keys)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glShadeModel(GL_SMOOTH)
fps = pyglet.clock.ClockDisplay()
level = AbstractState()
class Player(Sprite):
def __init__(self, x, y, state):
Sprite.__init__(self, x, y, [8, 0, 16, 32], state, 'vermouth', [0, 0, 32, 32], state.batch, state.player_group)
self.start_jump = False;
self.jump_timeout = 0.2
self.jump_timer = 0.0
self.right = True;
self.create_animations()
def create_animations(self):
self.add_animation('walkRight', [[0, 0, 32, 32], [32, 0, 32, 32]], fps=20.0)
self.add_animation('walkLeft', [[0, 32, 32, 32], [32, 32, 32, 32]], fps=20.0)
self.add_animation('standRight', [[0, 0, 32, 32]])
self.add_animation('standLeft', [[0, 32, 32, 32]])
self.add_animation('jumpRight', [[64, 0, 32, 32]])
self.add_animation('jumpLeft', [[64, 32, 32, 32]])
def update(self, dt, keys, state):
acc = 1000.0
jump = 2000.0
if self.start_jump is True:
if self.on_ground:
self.start_jump = False
self.jump_timer -= dt
if self.jump_timer <= 0:
self.start_jump = False
self.jump_timer = 0
if not (keys[pyglet.window.key.SPACE]):
self.start_jump = False
else:
self.vy += jump * dt
if self.on_ground and keys[pyglet.window.key.SPACE]:
self.start_jump = True
self.vy += jump * dt
self.jump_timer = self.jump_timeout
if keys[pyglet.window.key.LEFT]:
self.right = False
self.vx += -acc * dt
if keys[pyglet.window.key.RIGHT]:
self.right = True
self.vx += acc * dt
Sprite.update(self, dt, keys, state)
if self.y < 0:
self.dead = True
def update_animations(self, dt, keys, state):
if keys[pyglet.window.key.RIGHT] and self.on_ground:
self.play_animation('walkRight')
elif keys[pyglet.window.key.LEFT] and self.on_ground:
self.play_animation('walkLeft')
elif self.vx > 0 and self.on_ground:
self.play_animation('standRight')
elif self.vx < 0 and self.on_ground:
self.play_animation('standLeft')
elif (not self.on_ground) and self.vx > 0:
self.play_animation('jumpRight')
elif (not self.on_ground) and self.vx < 0:
self.play_animation('jumpLeft')
def collision_callback(self, other_sprite):
if other_sprite.deadly_to_player:
self.dead = True
class SampleScene(AbstractState):
def __init__(self):
AbstractState.__init__(self)
self.batch = pyglet.graphics.Batch()
self.player_group = pyglet.graphics.OrderedGroup(3)
self.sprite_group = pyglet.graphics.OrderedGroup(2)
self.map_group = pyglet.graphics.OrderedGroup(1)
self.background_group = pyglet.graphics.OrderedGroup(0)
self.sprites = []
self.player = None
self.width = 10
self.height = 10
self.map = TileMap(32, 32, self.width, self.height, self, 'tileSet', [0, 128, 32, 32], self.map_group)
self.map.build_surrounding_walls()
self.map.auto_tile()
self.camera = [0, 0]
self.player = Player(5*32, 5*32, self)
self.sprites.append(self.player)
def draw(self):
glLoadIdentity()
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
self.batch.draw()
glPopMatrix()
def update(self, dt, keys):
if dt > 0.05:
return
for sprite in self.sprites:
if sprite.updatable:
sprite.update(dt, keys, self)
# Now we turn off all the sprites
for sprite in self.sprites:
sprite.on = False
for sprite in self.sprites:
if sprite.updatable:
sprite.resolve_tile_map_collisions(self.map)
# Broad phase collision
rdc = RDC()
rdc.recursive_clustering(self.sprites, 0, 1)
groups = rdc.colliding_groups
#Now do narrow phase collision and resolution
for group in groups:
pairs = list(itertools.combinations(group, 2))
for pair in pairs:
pair[0].separate(pair[1])
# Double check that no one resolved into a wall
for sprite in self.sprites:
sprite.resolve_tile_map_collisions(self.map)
sprite.update_sprite_pos()
level = SampleScene()
@window.event
def on_resize(width, height):
print(width)
print(height)
if height==0:
height=1
def update(dt):
level.update(dt, keys);
@window.event
def on_draw():
window.clear()
level.draw()
fps.draw()
#pyglet.gl.glFlush()
#pyglet.gl.glFinish()
@window.event
def on_mouse_press(x, y, button, modifiers):
level.handle_mouse_press(x, y, button, modifiers)
@window.event
def on_mouse_release(x, y, button, modifiers):
level.handle_mouse_release(x, y, button, modifiers)
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
level.handle_mouse_drag(x, y, dx, dy, buttons, modifiers)
@window.event()
def on_mouse_scroll(x, y, scroll_x, scroll_y):
level.handle_mouse_scroll(x, y, scroll_x, scroll_y)
@window.event()
def on_mouse_motion(x, y, dx, dy):
level.handle_mouse_motion(x, y, dx, dy)
if __name__ == '__main__':
pyglet.clock.schedule_interval(update, 1/60.0)#systemSettings.desiredFps)#)
pyglet.app.run()
| mattfister/freezegame | freezegame/sample.py | Python | mit | 6,929 |
# coding: utf-8
import itertools
import json
import logging
from datetime import date
from django.contrib import messages
from django.contrib.admin.models import LogEntry, DELETION
from django.contrib.contenttypes.models import ContentType
from django.core.serializers.json import DjangoJSONEncoder
from django.http import Http404
from django.urls import reverse
from django.db.models import Count
from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView, FormView, DetailView
from accounts.models import UserAccount
from organizations.models import Facility, FacilityMembership
from organizations.templatetags.memberships import is_facility_member, \
is_membership_pending
from organizations.views import get_facility_details
from scheduler.models import Shift
from scheduler.models import ShiftHelper
from volunteer_planner.utils import LoginRequiredMixin
from .forms import RegisterForShiftForm
logger = logging.getLogger(__name__)
def get_open_shifts():
shifts = Shift.open_shifts.all()
shifts = shifts.select_related('facility',
'facility__place',
'facility__place__area',
'facility__place__area__region',
'facility__place__area__region__country',
)
shifts = shifts.order_by('facility__place__area__region__country',
'facility__place__area__region',
'facility__place__area',
'facility__place',
'facility',
'starting_time',
)
return shifts
class HelpDesk(LoginRequiredMixin, TemplateView):
"""
Facility overview. First view that a volunteer gets redirected to when they log in.
"""
template_name = "helpdesk.html"
@staticmethod
def serialize_news(news_entries):
return [dict(title=news_entry.title,
date=news_entry.creation_date,
text=news_entry.text) for news_entry in news_entries]
def get_context_data(self, **kwargs):
context = super(HelpDesk, self).get_context_data(**kwargs)
open_shifts = get_open_shifts()
shifts_by_facility = itertools.groupby(open_shifts,
lambda s: s.facility)
facility_list = []
used_places = set()
for facility, shifts_at_facility in shifts_by_facility:
used_places.add(facility.place.area)
facility_list.append(
get_facility_details(facility, shifts_at_facility))
context['areas_json'] = json.dumps(
[{'slug': area.slug, 'name': area.name} for area in
sorted(used_places, key=lambda p: p.name)])
context['facility_json'] = json.dumps(facility_list,
cls=DjangoJSONEncoder)
context['shifts'] = open_shifts
return context
class GeographicHelpdeskView(DetailView):
template_name = 'geographic_helpdesk.html'
context_object_name = 'geographical_unit'
@staticmethod
def make_breadcrumps_dict(country, region=None, area=None,
place=None):
result = dict(country=country, flattened=[country, ])
for k, v in zip(('region', 'area', 'place'), (region, area, place)):
if v:
result[k] = v
result['flattened'].append(v)
return result
def get_queryset(self):
return super(GeographicHelpdeskView,
self).get_queryset().select_related(
*self.model.get_select_related_list())
def get_context_data(self, **kwargs):
context = super(GeographicHelpdeskView, self).get_context_data(**kwargs)
place = self.object
context['breadcrumps'] = self.make_breadcrumps_dict(*place.breadcrumps)
context['shifts'] = get_open_shifts().by_geography(place)
return context
class ShiftDetailView(LoginRequiredMixin, FormView):
template_name = 'shift_details.html'
form_class = RegisterForShiftForm
def get_context_data(self, **kwargs):
context = super(ShiftDetailView, self).get_context_data(**kwargs)
schedule_date = date(int(self.kwargs['year']),
int(self.kwargs['month']),
int(self.kwargs['day']))
try:
shift = Shift.objects.on_shiftdate(schedule_date).annotate(
volunteer_count=Count('helpers')).get(
facility__slug=self.kwargs['facility_slug'],
id=self.kwargs['shift_id'])
except Shift.DoesNotExist:
raise Http404()
context['shift'] = shift
return context
def get_success_url(self):
"""
Redirect to the same page.
"""
return reverse('shift_details', kwargs=self.kwargs)
class PlannerView(LoginRequiredMixin, FormView):
"""
View that gets shown to volunteers when they browse a specific day.
It'll show all the available shifts, and they can add and remove
themselves from shifts.
"""
template_name = "helpdesk_single.html"
form_class = RegisterForShiftForm
def get_context_data(self, **kwargs):
context = super(PlannerView, self).get_context_data(**kwargs)
schedule_date = date(int(self.kwargs['year']),
int(self.kwargs['month']),
int(self.kwargs['day']))
facility = get_object_or_404(Facility,
slug=self.kwargs['facility_slug'])
shifts = Shift.objects.filter(facility=facility)
shifts = shifts.on_shiftdate(schedule_date)
shifts = shifts.annotate(volunteer_count=Count('helpers'))
shifts = shifts.order_by('task', 'workplace', 'ending_time')
shifts = shifts.select_related('task', 'workplace', 'facility')
shifts = shifts.prefetch_related('helpers', 'helpers__user')
context['shifts'] = shifts
context['facility'] = facility
context['schedule_date'] = schedule_date
return context
def form_invalid(self, form):
messages.warning(self.request, _(u'The submitted data was invalid.'))
return super(PlannerView, self).form_invalid(form)
def form_valid(self, form):
user = self.request.user
try:
user_account = UserAccount.objects.get(user=user)
except UserAccount.DoesNotExist:
messages.warning(self.request, _(u'User account does not exist.'))
return super(PlannerView, self).form_valid(form)
shift_to_join = form.cleaned_data.get("join_shift")
shift_to_leave = form.cleaned_data.get("leave_shift")
if shift_to_join:
if shift_to_join.members_only \
and not is_facility_member(self.request.user,
shift_to_join.facility):
if not is_membership_pending(user, shift_to_join.facility):
mbs, created = FacilityMembership.objects.get_or_create(
user_account=user_account,
facility=shift_to_join.facility, defaults=dict(
status=FacilityMembership.Status.PENDING,
role=FacilityMembership.Roles.MEMBER
)
)
if created:
messages.success(self.request, _(
u'A membership request has been sent.'))
return super(PlannerView, self).form_valid(form)
hard_conflicts, graced_conflicts = ShiftHelper.objects.conflicting(shift_to_join,
user_account=user_account)
hard_conflicted_shifts = [shift_helper.shift for shift_helper in
hard_conflicts]
soft_conflicted_shifts = [shift_helper.shift for shift_helper in
graced_conflicts]
if hard_conflicted_shifts:
error_message = _(
u'We can\'t add you to this shift because you\'ve already agreed to other shifts at the same time:')
message_list = u'<ul>{}</ul>'.format('\n'.join(
[u'<li>{}</li>'.format(conflict) for conflict in
hard_conflicted_shifts]))
messages.warning(self.request,
mark_safe(u'{}<br/>{}'.format(error_message,
message_list)))
elif shift_to_join.slots - shift_to_join.volunteer_count <= 0:
error_message = _(
u'We can\'t add you to this shift because there are no more slots left.')
messages.warning(self.request, error_message)
else:
shift_helper, created = ShiftHelper.objects.get_or_create(
user_account=user_account, shift=shift_to_join)
if created:
messages.success(self.request, _(
u'You were successfully added to this shift.'))
if soft_conflicted_shifts:
warning_message = _(
u'The shift you joined overlaps with other shifts you already joined. Please check for \
conflicts:')
message_list = u'<ul>{}</ul>'.format('\n'.join(
[u'<li>{}</li>'.format(conflict) for conflict in
soft_conflicted_shifts]))
messages.warning(self.request,
mark_safe(u'{}<br/>{}'.format(warning_message,
message_list)))
else:
messages.warning(self.request, _(
u'You already signed up for this shift at {date_time}.').format(
date_time=shift_helper.joined_shift_at))
elif shift_to_leave:
try:
sh = ShiftHelper.objects.get(user_account=user_account, shift=shift_to_leave)
LogEntry.objects.log_action(
user_id=user.id,
content_type_id=ContentType.objects.get_for_model(ShiftHelper).id,
object_id=sh.id,
object_repr='User "{user}" @ shift "{shift}"'.format(user=user, shift=shift_to_leave),
action_flag=DELETION,
change_message='Initially joined at {}'.format(sh.joined_shift_at.isoformat()),
)
sh.delete()
except ShiftHelper.DoesNotExist:
# just catch the exception,
# user seems not to have signed up for this shift
pass
messages.success(self.request, _(
u'You successfully left this shift.'))
return super(PlannerView, self).form_valid(form)
def get_success_url(self):
"""
Redirect to the same page.
"""
return reverse('planner_by_facility', kwargs=self.kwargs)
| pitpalme/volunteer_planner | scheduler/views.py | Python | agpl-3.0 | 11,582 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the tray starter configuration page.
"""
from __future__ import unicode_literals
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_TrayStarterPage import Ui_TrayStarterPage
import Preferences
import UI.PixmapCache
class TrayStarterPage(ConfigurationPageBase, Ui_TrayStarterPage):
"""
Class implementing the tray starter configuration page.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(TrayStarterPage, self).__init__()
self.setupUi(self)
self.setObjectName("TrayStarterPage")
self.standardButton.setIcon(UI.PixmapCache.getIcon("erict.png"))
self.highContrastButton.setIcon(UI.PixmapCache.getIcon("erict-hc.png"))
self.blackWhiteButton.setIcon(UI.PixmapCache.getIcon("erict-bw.png"))
self.blackWhiteInverseButton.setIcon(
UI.PixmapCache.getIcon("erict-bwi.png"))
# set initial values
iconName = Preferences.getTrayStarter("TrayStarterIcon")
if iconName == "erict.png":
self.standardButton.setChecked(True)
elif iconName == "erict-hc.png":
self.highContrastButton.setChecked(True)
elif iconName == "erict-bw.png":
self.blackWhiteButton.setChecked(True)
elif iconName == "erict-bwi.png":
self.blackWhiteInverseButton.setChecked(True)
def save(self):
"""
Public slot to save the Python configuration.
"""
if self.standardButton.isChecked():
iconName = "erict.png"
elif self.highContrastButton.isChecked():
iconName = "erict-hc.png"
elif self.blackWhiteButton.isChecked():
iconName = "erict-bw.png"
elif self.blackWhiteInverseButton.isChecked():
iconName = "erict-bwi.png"
Preferences.setTrayStarter("TrayStarterIcon", iconName)
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = TrayStarterPage()
return page
| testmana2/test | Preferences/ConfigurationPages/TrayStarterPage.py | Python | gpl-3.0 | 2,369 |
from ESSArch_Core.exceptions import ESSArchException
class UnknownTransformer(ESSArchException):
pass
| ESSolutions/ESSArch_Core | ESSArch_Core/fixity/transformation/exceptions.py | Python | gpl-3.0 | 108 |
"""
Performs management commands for the scheduler app
"""
import hashlib
from flask.ext.script import Manager
from sqlalchemy import exc
# Importing routes causes our URL routes to be registered
from src import routes
from src import models
from src import scheduler
scheduler.app.config.from_object(scheduler.ConfigDevelopment)
manager = Manager(scheduler.app)
def add_coaches():
"""
Adds two coaches (for testing)
"""
user_1 = models.User(
id='ecf1bcae-9c8f-11e5-b5b4-d895b95699bb',
fullname='Pat Blargstone',
username='pat',
password=hashlib.md5('secret').hexdigest())
coach_1 = models.Coach(
id='ee8d1d30-9c8f-11e5-89d4-d895b95699bb',
user_id=user_1.id)
user_2 = models.User(
id='ef2a95b0-9c8f-11e5-bd27-d895b95699bb',
fullname='Sandy Blargwright',
username='sandy',
password=hashlib.md5('secret').hexdigest())
coach_2 = models.Coach(
id='efad3330-9c8f-11e5-9654-d895b95699bb',
user_id=user_2.id)
try:
scheduler.db.session.add(user_1)
scheduler.db.session.add(user_2)
scheduler.db.session.add(coach_1)
scheduler.db.session.add(coach_2)
scheduler.db.session.commit()
except exc.SQLAlchemyError:
pass
if __name__ == '__main__':
scheduler.db.create_all()
add_coaches()
manager.run()
| ginstrom/scheduler | manage.py | Python | mit | 1,385 |
"""Test class for Environment Preparation after a fresh installation"""
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.org import Org
from robottelo.cli.product import Product
from robottelo.cli.repository import Repository
from robottelo.cli.repository_set import RepositorySet
from robottelo.cli.subscription import Subscription
from robottelo.config import settings
from robottelo.performance.constants import MANIFEST_FILE_NAME
from robottelo.test import TestCase
class StandardPrepTestCase(TestCase):
"""Standard process of preparation after fresh install Satellite 6.
Standard Preparation Process:
1. upload manifest,
2. change CDN address,
3. enable repositories,
4. make savepoint
"""
@classmethod
def setUpClass(cls):
super(StandardPrepTestCase, cls).setUpClass()
# parameters for standard process test
# note: may need to change savepoint name
cls.savepoint = settings.performance.fresh_install_savepoint,
# parameters for uploading manifests
cls.manifest_file = settings.fake_manifest.url
cls.org_id = ''
# parameters for changing cdn address
cls.target_url = settings.performance.cdn_address
# parameters for enabling repositories
cls.sub_id = ''
cls.sub_name = ''
cls.pid = ''
# [repo-id,$basearch,$releasever]
cls.repository_list = [
[168, 'x86_64', '6Server'],
[2456, 'x86_64', '7Server'],
[1952, 'x86_64', '6.6'],
[2455, 'x86_64', '7.1'],
[166, 'x86_64', '6Server'],
[2463, 'x86_64', '7Server'],
[167, 'x86_64', '6Server'],
[2464, 'x86_64', '7Server'],
[165, 'x86_64', '6Server'],
[2462, 'x86_64', '7Server']
]
def setUp(self):
self.logger.debug('Running test %s/%s',
type(self).__name__, self._testMethodName)
# Restore database to clean state
self._restore_from_savepoint(self.savepoint)
# Get organization-id
self.org_id = self._get_organization_id()
def _restore_from_savepoint(self, savepoint):
"""Restore from a given savepoint"""
if savepoint == '':
self.logger.warning('No savepoint while continuing test!')
return
self.logger.info('Reset db from /home/backup/{0}'.format(savepoint))
ssh.command('./reset-db.sh /home/backup/{0}'.format(savepoint))
def _download_manifest(self):
"""Utility function to download manifest from given URL"""
self.logger.info(
'Start downloading manifest: {0}'.format(MANIFEST_FILE_NAME))
result = ssh.command(
'rm -f {0}; curl {1} -o /root/{0}'
.format(MANIFEST_FILE_NAME, self.manifest_file))
if result.return_code != 0:
self.logger.error('Fail to download manifest!')
raise RuntimeError('Unable to download manifest. Stop!')
self.logger.info('Downloading manifest complete.')
def _upload_manifest(self):
"""Utility function to upload manifest"""
self.logger.debug('org-id is {0}'.format(self.org_id))
try:
Subscription.upload({
'file': '/root/{0}'.format(MANIFEST_FILE_NAME),
'organization-id': self.org_id,
})
except CLIReturnCodeError:
self.logger.error('Fail to upload manifest!')
raise RuntimeError('Invalid manifest. Stop!')
self.logger.info('Upload successful!')
# after uploading manifest, get all default parameters
self.pid = self._get_production_id()
(self.sub_id, self.sub_name) = self._get_subscription_id()
self.logger.debug('product id is {0}'.format(self.pid))
def _get_organization_id(self):
"""Get organization id"""
try:
result = Org.list(per_page=False)
except CLIReturnCodeError:
self.logger.error('Fail to list default organization.')
raise RuntimeError('Invalid organization id. Stop!')
return result[0]['id']
def _get_production_id(self):
"""Get available product id after uploading manifest"""
try:
result = Product.list(
{'organization-id': self.org_id},
per_page=False
)
except CLIReturnCodeError:
self.logger.error('Fail to list default products.')
raise RuntimeError('Invalid product id. Stop!')
for item in result:
if item['name'] == u'Red Hat Enterprise Linux Server':
return item['id']
def _get_subscription_id(self):
"""Utility function to get subscription id after uploading manifest"""
try:
result = Subscription.list(
{'organization-id': self.org_id},
per_page=False
)
except CLIReturnCodeError:
self.logger.error('Fail to list subscriptions!')
raise RuntimeError('Invalid subscription id. Stop!')
if not result:
self.logger.error('Fail to get subscription id!')
raise RuntimeError('Manifest has no subscription!')
subscription_id = result[0]['id']
subscription_name = result[0]['name']
self.logger.info(
'Subscribe to {0} with subscription id: {1}'
.format(subscription_name, subscription_id)
)
return (subscription_id, subscription_name)
def _update_cdn_address(self):
"""Utility function to update CDN address from given URL"""
if self.target_url == '':
raise RuntimeError('Invalid CDN address. Stop!')
Org.update({
'id': self.org_id,
'redhat-repository-url': self.target_url,
})
try:
result = Org.info({'id': self.org_id})
except CLIReturnCodeError:
self.logger.error('Fail to update CDN address!')
return
self.logger.info(
'RH CDN URL: {0}'
.format(result['red-hat-repository-url']))
def _enable_repositories(self):
"""Utility function to retrieve enabled repositories"""
for i, repo in enumerate(self.repository_list):
repo_id = repo[0]
basearch = repo[1]
releasever = repo[2]
self.logger.info(
'Enabling product {0}: repository id {1} '
'with baserach {2} and release {3}'
.format(i, repo_id, basearch, releasever))
# Enable repos from Repository Set
RepositorySet.enable({
'basearch': basearch,
'id': repo_id,
'product-id': self.pid,
'releasever': releasever,
})
# verify enabled repository list
result = Repository.list(
{'organization-id': self.org_id},
per_page=False
)
# repo_list_ids would contain all repositories in the hammer repo list
repo_list_ids = [repo['id'] for repo in result]
self.logger.debug(repo_list_ids)
def test_standard_prep(self):
"""add Manifest to Satellite Server
@Steps:
1. download manifest
2. upload to subscription
3. update Red Hat CDN URL
4. enable repositories
5. take db snapshot backup
@Assert: Restoring from database where its status is clean
"""
self._download_manifest()
self._upload_manifest()
self._update_cdn_address()
self._enable_repositories()
| danuzclaudes/robottelo | tests/foreman/performance/test_standard_prep.py | Python | gpl-3.0 | 7,716 |
"""
homeassistant.components.media_player.snapcast
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides functionality to interact with Snapcast clients.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.snapcast/
"""
import logging
import socket
from homeassistant.components.media_player import (
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import STATE_OFF, STATE_ON
SUPPORT_SNAPCAST = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE
DOMAIN = 'snapcast'
REQUIREMENTS = ['snapcast==1.1.1']
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Snapcast platform. """
import snapcast.control
host = config.get('host')
port = config.get('port', snapcast.control.CONTROL_PORT)
if not host:
_LOGGER.error('No snapserver host specified')
return
try:
server = snapcast.control.Snapserver(host, port)
except socket.gaierror:
_LOGGER.error('Could not connect to Snapcast server at %s:%d',
host, port)
return
add_devices([SnapcastDevice(client) for client in server.clients])
class SnapcastDevice(MediaPlayerDevice):
""" Represents a Snapcast client device. """
# pylint: disable=abstract-method
def __init__(self, client):
self._client = client
@property
def name(self):
""" Device name. """
return self._client.identifier
@property
def volume_level(self):
""" Volume level. """
return self._client.volume / 100
@property
def is_volume_muted(self):
""" Volume muted. """
return self._client.muted
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_SNAPCAST
@property
def state(self):
""" State of the player. """
if self._client.connected:
return STATE_ON
return STATE_OFF
def mute_volume(self, mute):
""" Mute status. """
self._client.muted = mute
def set_volume_level(self, volume):
""" Volume level. """
self._client.volume = round(volume * 100)
| nnic/home-assistant | homeassistant/components/media_player/snapcast.py | Python | mit | 2,333 |
from direct.directnotify import DirectNotifyGlobal
import DistributedBrutalFactoryAI
import DistributedFactoryAI
from toontown.toonbase import ToontownGlobals
from direct.showbase import DirectObject
class FactoryManagerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('FactoryManagerAI')
factoryId = None
def __init__(self, air):
DirectObject.DirectObject.__init__(self)
self.air = air
def getDoId(self):
return 0
def createFactory(self, factoryId, entranceId, players):
factoryZone = self.air.allocateZone()
if FactoryManagerAI.factoryId is not None:
factoryId = FactoryManagerAI.factoryId
if entranceId == 2:
factory = DistributedBrutalFactoryAI.DistributedBrutalFactoryAI(self.air, factoryId, factoryZone, entranceId, players)
else:
factory = DistributedFactoryAI.DistributedFactoryAI(self.air, factoryId, factoryZone, entranceId, players)
factory.generateWithRequired(factoryZone)
return factoryZone
| Spiderlover/Toontown | toontown/coghq/FactoryManagerAI.py | Python | mit | 1,076 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Web',
'category': 'Hidden',
'version': '1.0',
'description':
"""
Odoo Web core module.
========================
This module provides the core of the Odoo Web Client.
""",
'depends': ['base'],
'auto_install': True,
'data': [
'views/webclient_templates.xml',
'views/report_templates.xml',
],
'qweb': [
"static/src/xml/base.xml",
"static/src/xml/kanban.xml",
"static/src/xml/rainbow_man.xml",
"static/src/xml/report.xml",
"static/src/xml/web_calendar.xml",
],
'bootstrap': True, # load translations for login screen
}
| maxive/erp | addons/web/__manifest__.py | Python | agpl-3.0 | 744 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../matchzoo'))
sys.path.insert(0, os.path.abspath('../../matchzoo/auto'))
sys.path.insert(0, os.path.abspath('../../matchzoo/data_generator'))
sys.path.insert(0, os.path.abspath('../../matchzoo/data_pack'))
sys.path.insert(0, os.path.abspath('../../matchzoo/datasets'))
sys.path.insert(0, os.path.abspath('../../matchzoo/embedding'))
sys.path.insert(0, os.path.abspath('../../matchzoo/engine'))
sys.path.insert(0, os.path.abspath('../../matchzoo/layers'))
sys.path.insert(0, os.path.abspath('../../matchzoo/losses'))
sys.path.insert(0, os.path.abspath('../../matchzoo/metrics'))
sys.path.insert(0, os.path.abspath('../../matchzoo/models'))
sys.path.insert(0, os.path.abspath('../../matchzoo/preprocessors'))
sys.path.insert(0, os.path.abspath('../../matchzoo/tasks'))
sys.path.insert(0, os.path.abspath('../../matchzoo/utils'))
# -- Project information -----------------------------------------------------
project = 'MatchZoo'
copyright = '2018, MatchZoo'
author = 'MatchZoo'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '2.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# from recommonmark.parser import CommonMarkParser
# source_parsers = {
# '.md':CommonMarkParser
# }
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store','**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MatchZoodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MatchZoo.tex', 'MatchZoo Documentation',
'MatchZoo', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'matchzoo', 'MatchZoo Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MatchZoo', 'MatchZoo Documentation',
author, 'MatchZoo', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| faneshion/MatchZoo | docs/source/conf.py | Python | apache-2.0 | 5,927 |
# runs integration tests against the osmo-smsc inserter
# Copyright (C) 2016 Henning Heinold <henning@itconsulting-heinold.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import sys
import smpplib.gsm
import smpplib.client
import smpplib.consts
@pytest.fixture
def smpp_client(request, smsc_inserter_image):
client = smpplib.client.Client("127.0.0.1", 9000)
def disconnect_client():
client.unbind()
client.disconnect()
request.addfinalizer(disconnect_client)
# Print when obtain message_id
client.set_message_received_handler(
lambda pdu: sys.stdout.write('delivered {}\n'.format(pdu.receipted_message_id)))
client.connect()
client.bind_transceiver(system_id="inserter-test", password="pass")
return client
def send_message(source, destination, message, smpp_client):
# be some form of mutable to be accessible by the inner function
sent = {'sent': 0}
parts, encoding_flag, msg_type_flag = smpplib.gsm.make_parts(message)
for part in parts:
smpp_client.send_message(
source_addr_ton=smpplib.consts.SMPP_TON_INTL,
source_addr_npi=smpplib.consts.SMPP_NPI_ISDN,
source_addr=source,
dest_addr_ton=smpplib.consts.SMPP_TON_INTL,
dest_addr_npi=smpplib.consts.SMPP_NPI_ISDN,
destination_addr=destination,
short_message=part,
data_coding=encoding_flag,
#esm_class=msg_type_flag,
esm_class=smpplib.consts.SMPP_MSGMODE_FORWARD,
registered_delivery=False,
)
def sent_message(pdu):
"""
Once we have receive the number of callbacks expected, let's break
out the listen loop that was entered below. With more time we can
have a better approach.
"""
sent['sent'] = sent['sent'] + 1
if len(parts) == sent['sent']:
raise Exception("Break the loop")
smpp_client.set_message_sent_handler(sent_message)
try:
smpp_client.listen()
except:
pass
@pytest.fixture
def sms_parts(smpp_client):
source = '3009'
destination = '3110'
message = 'Hello'
send_message(source, destination, message, smpp_client)
return source, destination, message
@pytest.mark.usefixtures("smsc_inserter_image")
class TestInserter:
def test_inserter_server(self, sms_parts, mongo_client, smsc_database):
source, destination, message = sms_parts
# database will be create when send_message is called
db = mongo_client[smsc_database]
assert db.smsQueue.count() == 1
cursor = db.smsQueue.find()
for sms in cursor:
assert sms['sourceMSISDN'] == source
assert sms['destMSISDN'] == destination
assert sms['encodedMessageType'] == 'SMPPSubmitSM'
| zecke/osmo-smsc | integration-tests/inserter_test.py | Python | agpl-3.0 | 3,504 |
from django import http
from django.http import HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from openid.consumer import consumer
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import pape, sreg
from openid.yadis.constants import YADIS_HEADER_NAME, YADIS_CONTENT_TYPE
from openid.server.trustroot import RP_RETURN_TO_URL_TYPE
from djopenid import util
PAPE_POLICIES = [
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
# List of (name, uri) for use in generating the request form.
POLICY_PAIRS = [(p, getattr(pape, p))
for p in PAPE_POLICIES]
def getOpenIDStore():
"""
Return an OpenID store object fit for the currently-chosen
database backend, if any.
"""
return util.getOpenIDStore('/tmp/djopenid_c_store', 'c_')
def getConsumer(request):
"""
Get a Consumer object to perform OpenID authentication.
"""
return consumer.Consumer(request.session, getOpenIDStore())
def renderIndexPage(request, **template_args):
template_args['consumer_url'] = util.getViewURL(request, startOpenID)
template_args['pape_policies'] = POLICY_PAIRS
response = direct_to_template(
request, 'consumer/index.html', template_args)
response[YADIS_HEADER_NAME] = util.getViewURL(request, rpXRDS)
return response
def startOpenID(request):
"""
Start the OpenID authentication process. Renders an
authentication form and accepts its POST.
* Renders an error message if OpenID cannot be initiated
* Requests some Simple Registration data using the OpenID
library's Simple Registration machinery
* Generates the appropriate trust root and return URL values for
this application (tweak where appropriate)
* Generates the appropriate redirect based on the OpenID protocol
version.
"""
if request.POST:
# Start OpenID authentication.
openid_url = request.POST['openid_identifier']
c = getConsumer(request)
error = None
try:
auth_request = c.begin(openid_url)
except DiscoveryFailure, e:
# Some other protocol-level failure occurred.
error = "OpenID discovery error: %s" % (str(e),)
if error:
# Render the page with an error.
return renderIndexPage(request, error=error)
# Add Simple Registration request information. Some fields
# are optional, some are required. It's possible that the
# server doesn't support sreg or won't return any of the
# fields.
sreg_request = sreg.SRegRequest(optional=['email', 'nickname'],
required=['dob'])
auth_request.addExtension(sreg_request)
# Add PAPE request information. We'll ask for
# phishing-resistant auth and display any policies we get in
# the response.
requested_policies = []
policy_prefix = 'policy_'
for k, v in request.POST.iteritems():
if k.startswith(policy_prefix):
policy_attr = k[len(policy_prefix):]
if policy_attr in PAPE_POLICIES:
requested_policies.append(getattr(pape, policy_attr))
if requested_policies:
pape_request = pape.Request(requested_policies)
auth_request.addExtension(pape_request)
# Compute the trust root and return URL values to build the
# redirect information.
trust_root = util.getViewURL(request, startOpenID)
return_to = util.getViewURL(request, finishOpenID)
# Send the browser to the server either by sending a redirect
# URL or by generating a POST form.
if auth_request.shouldSendRedirect():
url = auth_request.redirectURL(trust_root, return_to)
return HttpResponseRedirect(url)
else:
# Beware: this renders a template whose content is a form
# and some javascript to submit it upon page load. Non-JS
# users will have to click the form submit button to
# initiate OpenID authentication.
form_id = 'openid_message'
form_html = auth_request.formMarkup(trust_root, return_to,
False, {'id': form_id})
return direct_to_template(
request, 'consumer/request_form.html', {'html': form_html})
return renderIndexPage(request)
def finishOpenID(request):
"""
Finish the OpenID authentication process. Invoke the OpenID
library with the response from the OpenID server and render a page
detailing the result.
"""
result = {}
# Because the object containing the query parameters is a
# MultiValueDict and the OpenID library doesn't allow that, we'll
# convert it to a normal dict.
# OpenID 2 can send arguments as either POST body or GET query
# parameters.
request_args = util.normalDict(request.GET)
if request.method == 'POST':
request_args.update(util.normalDict(request.POST))
if request_args:
c = getConsumer(request)
# Get a response object indicating the result of the OpenID
# protocol.
return_to = util.getViewURL(request, finishOpenID)
response = c.complete(request_args, return_to)
# Get a Simple Registration response object if response
# information was included in the OpenID response.
sreg_response = {}
if response.status == consumer.SUCCESS:
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
# Get a PAPE response object if response information was
# included in the OpenID response.
pape_response = None
if response.status == consumer.SUCCESS:
pape_response = pape.Response.fromSuccessResponse(response)
# Map different consumer status codes to template contexts.
results = {
consumer.CANCEL:
{'message': 'OpenID authentication cancelled.'},
consumer.FAILURE:
{'error': 'OpenID authentication failed.'},
consumer.SUCCESS:
{'url': response.getDisplayIdentifier(),
'sreg': sreg_response.items(),
'pape': pape_response},
}
result = results[response.status]
if isinstance(response, consumer.FailureResponse):
# In a real application, this information should be
# written to a log for debugging/tracking OpenID
# authentication failures. In general, the messages are
# not user-friendly, but intended for developers.
result['failure_reason'] = response.message
return renderIndexPage(request, **result)
def rpXRDS(request):
"""
Return a relying party verification XRDS document
"""
return util.renderXRDS(
request,
[RP_RETURN_TO_URL_TYPE],
[util.getViewURL(request, finishOpenID)])
| alon/polinax | libs/external_libs/python-openid-2.1.1/examples/djopenid/consumer/views.py | Python | gpl-2.0 | 7,062 |
"""
edx-organizations management commands tests package initialization module
"""
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/organizations/management/commands/tests/__init__.py | Python | agpl-3.0 | 82 |
from abc import abstractmethod
from treehopper.libraries.sensors import Pollable
class Magnetometer(Pollable):
"""Three-axis magnetic sensor"""
def __init__(self):
super().__init__()
self._magnetometer = [0.0, 0.0, 0.0]
@property
def magnetometer(self):
if self.auto_update_when_property_read:
self.update()
return self._magnetometer
@abstractmethod
def update(self):
pass | treehopper-electronics/treehopper-sdk | Python/treehopper/libraries/sensors/magnetic/magnetometer.py | Python | mit | 453 |
from opal.TestProblemCollections import CUTEr
from opal.TestProblemCollections.cuterfactory import CUTErQuery
'''
Some notes on special problems:
- JUNKTURN is solved (ecode = 0) with surrogate whose tolerance is less strict.
For the original blackbox, JUNKTURN return always exit code = 2. However
the number of function evaluation and computing time of surrogate is
so much higher. This means that the behavior on the surrogate is inverse
from what we desire.
- DENCONVNE solved with surrogate but with blackbox, it returns ECODE=1
or -1 and
number of function evaluation is 1000 times much than surrogate
- CRESC132 sometimes returns code 2, sometimes return code -1. The behavior on
blackbox and surrogate is similar
- A5NSSSSL: For p0, ECODE = 0, for almost other parameter values, ECODE=-1
(30/32 parameter values) with computing time and FEVAL are so huge
(10-30 miniutes in comparing to 17 seconds and, 36521 in comparing to 71).
Two parameter values whose ECODE = 0 correspond to two special parameter sets.
Initial parameter sets, and a better parameter set. This means, A5NSSSSL
has huge impact on the similarity between original model (730 test problems)
and reduced model (40 test problems). Actually, it has impact on the
clustering, ATNSSSSL is not counted in reduced model. This make reduced model
can be improved easily but it not the same behavior on original model.
'''
query = CUTErQuery(constraintType='BNLQO')
# Select tiny unconstrained HS problems.
CUTEr_constrained_problems = [prb for prb in CUTEr if query.match(prb)]
# Two few degree of freedom
too_few_freedom_problems = ['ARGAUSS', 'ARGLALE', 'ARGLBLE', 'ARGLCLE',
'ARWHDNE', 'CHNRSBNE','GROUPING','GROWTH',
'LEWISPOL','NYSTROM5','QR3DBD','SPMSQRT',
'SYNPOP24','YFITNE']
# Fixed variable, exit code = -3
fixed_variables_problems = ['RAYBENDS', 'RAYBENDL']
# Has difficulty with linear solver and CUTEr, CATENA can be solved by MUMPS
technical_difficulty_problems = ['CATENA', 'COSHFUN']
# Exit code =-1, Number of iteration exeeds 3000
hard_problems = ['A2NNDNSL', 'BLOCKQP1', 'BLOCKQP3', 'BLOCKQP5', 'BRAINPC0',
'BRAINPC2', 'BRAINPC4', 'BRAINPC6', 'BRITGAS', 'CATENARY',
'CRESC100', 'CRESC50', 'FLOSP2HH', 'GLIDER', 'HATFLDF',
'HS87', 'HVYCRASH', 'KTMODEL', 'LIPPERT2', 'LUKVLE11',
'LUKVLE15', 'LUKVLI1', 'MANNE', 'MSS3', 'NUFFIELD', 'ORTHREGE',
'PALMER5A', 'PALMER5E', 'PALMER7A', 'PALMER7E', 'POLAK3',
'SARO', 'SAROMM']
# Exit code = -2. Restoration failed
restoration_failed_problems = ['BRAINPC1', 'BRAINPC3', 'BRAINPC5', 'BRAINPC7',
'BRAINPC8', 'BRAINPC9', 'C-RELOAD', 'EQC',
'HIMMELBJ', 'LUKVLE17', 'LUKVLE2', 'LUKVLE4',
'LUKVLI2', 'LUKVLI4', 'NET2', 'NET3',
'ORTHRDS2', 'PFIT2', 'QCNEW', 'S365', 'S365MOD',
'SSEBNLN', 'VANDERM4']
# Runtime > 100 sec
long_problems = ['NCVXQP1', 'NCVXQP2', 'NCVXQP3', 'NCVXQP4', 'NCVXQP5',
'NCVXQP6', 'NCVXQP7', 'NCVXQP8', 'NCVXQP9', 'ODNAMUR']
# Runtime approx 10h
extreme_long_problems = ['LUKVLE15']
# Exit code = 1: Solve to acceptable level
Solved_acceptable_level_problems = ['BLEACHNG', 'BRATU2DT', 'CATMIX', 'CSFI2',
'DECONVNE', 'LCH', 'LUKVLE18', 'READING9',
'ROBOT', 'SINROSNB', 'SOSQP1']
# Exit code = 2: Converged to a point of local infeasibility. Problem may be
# infeasible.
infeasible_problems = ['A2NNDNIL', 'A2NSDSIL', 'A5NNDNIL', 'A5NSDSIL', 'ARTIF',
'CONT6-QQ', 'CRESC132', 'CRESC4', 'DISCS', 'DRCAVTY2',
'DRCAVTY3', 'EG3', 'FLOSP2HL', 'FLOSP2HM', 'HIMMELBD',
'JUNKTURN', 'LINCONT', 'LIPPERT1', 'MODEL', 'NASH',
'OSCIPANE', 'PFIT3', 'PFIT4', 'POWELLSQ', 'WOODSNE']
# Exit code = 4: Iterates divering; problem might be unbounded.
unbounded_problems = ['ELATTAR', 'MESH', 'STATIC3']
# Exit code = 6: Feasible point for square problem found.
feasible_square_problems = ['VANDERM1', 'VANDERM2', 'VANDERM3']
# Unstable problems: Sometime return code = 0, sometime does not return code 0
unstable_problems = ['POWELLSQ', 'VANDERM3']
ipopt_solvable_problems = [prob for prob in CUTEr_constrained_problems if
(prob.name not in too_few_freedom_problems) and
(prob.name not in fixed_variables_problems) and
(prob.name not in technical_difficulty_problems) and
(prob.name not in hard_problems) and
(prob.name not in restoration_failed_problems)]
ipopt_opal_test_problems = [prob for prob in ipopt_solvable_problems if
prob.name not in long_problems]
ipopt_hard_problems = [prob for prob in CUTEr_constrained_problems if
(prob.name in hard_problems) and
(prob.name not in extreme_long_problems)]
# Exit code = 0, and solving time <= 0.001 for parameterpardefault
ipopt_opal_surrogate_problems = [prob for prob in CUTEr_constrained_problems
if prob.name in ['AIRCRFTA', 'BOOTH', 'BT12', 'BT3',
'CHACONN1', 'CHACONN2', 'CUBENE',
'DIXCHLNG', 'EG1', 'EXTRASIM',
'GENHS28', 'GOTTFR', 'HATFLDA',
'HIMMELBA', 'HIMMELBC', 'HIMMELBE',
'HS14', 'HS22', 'HS28', 'HS3', 'HS31',
'HS35I', 'HS3MOD', 'HS4', 'HS40',
'HS48', 'HS50', 'HS51', 'HS52',
'HS55', 'HS60', 'HS78', 'HS79', 'HS8',
'HS80', 'HS81', 'HS9', 'HS99',
'HYPCIR', 'KIWCRESC', 'LSQFIT',
'MAKELA2', 'MARATOS', 'MIFFLIN1',
'MWRIGHT', 'ORTHREGB', 'PALMER5D',
'SIMBQP', 'SINVALNE', 'SUPERSIM',
'TAME', 'ZANGWIL3', 'ZECEVIC4']]
# Radomly representative
ipopt_opal_p0_clustered_problems_0 = [prob for prob in CUTEr_constrained_problems
if prob.name in ['PALMER1E', 'HS28',
'CUBENE', 'HS63', 'BT4',
'A0ESDNDL', 'EIGMINB',
'HS79', 'BROWNALE',
'HS77', 'BT2', 'DTOC1ND',
'LUKVLE7', 'BT7',
'LUKVLE10', 'BT6',
'DUAL4', 'QR3D', 'GASOIL',
'SPANHYD', 'GRIDNETF',
'HS100LNP', 'JANNSON3',
'OPTCNTRL', 'TRAINF',
'HELSBY', 'QPCBLEND',
'ZAMB2', 'LCH',
'FLETCHER', 'HS109',
'FLOSP2HL', 'DISC2',
'CORE1', 'QPCBOEI2',
'HYDROELL', 'ORBIT2',
'OET2', 'QPNBOEI2']]
# Most represetatvie
ipopt_opal_p0_clustered_problems_1 = [prob
for prob in CUTEr_constrained_problems
if prob.name in['ROBOTARM', 'GENHS28',
'ORTHREGB', 'ORTHRDM2',
'HS41', 'PORTFL3',
'LUKVLE3', 'HS9', 'HS62',
'OPTCTRL6', 'HS69',
'HAGER4', 'HATFLDG',
'TRIGGER', 'GRIDNETG',
'PINENE', 'BDVALUES',
'HEART8', 'GASOIL',
'ALLINITC', 'ORTHRGDS',
'SREADIN3', 'SAWPATH',
'DEGENLPB', 'TRAINF',
'HELSBY', 'QPCBLEND',
'A5NSDSDM', 'LCH',
'PRODPL1', 'OPTMASS',
'A0NNSNSL', 'ERRINBAR',
'CORE1', 'HADAMARD',
'QPNSTAIR', 'ORBIT2',
'AGG', 'CORKSCRW']]
# Most representative, clustered by measure map (multiple soms)
ipopt_opal_p0_clustered_problems_2 = [prob
for prob in CUTEr_constrained_problems
if prob.name in ['POWELL20', 'BLOWEYB',
'BLEACHNG', 'CSFI2',
'CLNLBEAM', 'PALMER1',
'CORKSCRW', 'DRUGDISE',
'ORTHREGA', 'SSNLBEAM',
'A5NNDNSL', 'STEENBRG',
'SIPOW1', 'SIPOW2M',
'MODEL', 'PFIT4',
'VANDERM3', 'YFIT',
'DISCS', 'HIMMELP5',
'PALMER3A', 'DUALC8',
'HYDROELS', 'OPTMASS',
'FLETCHER', 'FLOSP2HL',
'CRESC4', 'QR3DLS',
'ELATTAR', 'MESH',
'A2NNDNDL', 'CHEBYQAD',
'QPCSTAIR', 'GAUSSELM',
'READING9', 'LUKVLI12',
'SOSQP1', 'LCH',
'VANDERM1', 'ROBOT',
'A0NNSNSL', 'HS99EXP',
'A5ENSNDL', 'SMMPSF',
'CORE2', 'A4X12',
'STATIC3', 'MAXLIKA',
'HS38', 'HS13',
'PALMER4', 'EG3',
'PALMER1B', 'DEMBO7',
'POWELLSQ', 'PALMER6A',
'PALMER3', 'OET7',
'TWIRISM1', 'NET1',
'ROTDISC', 'CONT6-QQ',
'BIGGSC4', 'ERRINBAR']]
test_problems = [prob for prob in CUTEr_constrained_problems if
prob.name in ['3PK','A0ENDNDL','A0ENINDL']]
| dpo/opal | examples/ipopt/ipopt_test_problems.py | Python | lgpl-3.0 | 12,281 |
#!/usr/bin/env python
"""Tests for the puppies package."""
| LifeBuddy/puppies | puppies/test/__init__.py | Python | agpl-3.0 | 60 |
import random
import matplotlib.pyplot as plt
# necessary packages
def throwdice(dicetype, thrownumber=1):
""" Throws dice """
value = 0
# # if thrownumber==1:
# # value=random.randint(0,dicetype)
# elif thrownumber > 1:
for i in range(1, thrownumber + 1):
value = value + random.randint(1, dicetype)
return value
# begin diagnostics of dicethrows in game
# known
class combination(object):
"""
Combination is a type of throw.
arguments: whattothrow -> the throw in a string, according to format
"""
def __init__(self, whattothrow):
self.whattothrow = whattothrow
self.content = {}
i = 0
textoconvert = ""
for letter in self.whattothrow:
if letter == "+":
if ("d" in textoconvert):
# this needs fixing
self.content[self.whattothrow[i + 1]
] = int(self.whattothrow[i - 1])
else:
self.content['constant'] = int(textoconvert)
textoconvert = ""
else:
textoconvert += letter
i = i + 1
# if textoconvert == self.whattothrow:
i = i - 2 # to get back to the part unended by +
self.content[self.whattothrow[i + 1]] = int(self.whattothrow[i - 1])
def do(self):
summa = 0
for dice in self.content.keys():
if dice == "constant":
summa += self.content["constant"]
else:
summa += throwdice(int(dice), self.content[dice])
return summa
def statistics(self, plot=False, average=False, sort=False):
# this generating is going to be done recursively
dump = [0] # this is the easisest way to generate a histogram
for dices in self.content.keys():
if dices == "constant":
i = 0
for element in dump:
dump[i] += self.content["constant"]
i = i + 1
else:
top = 1 + int(self.content[dices])
for throws in range(1, top):
# now begins the recursive function
i = 0
toadd = []
for element in dump:
dump[i] += 1
i = i + 1 # one is added to the existing dump
i = 0
for value in range(2, (int(dices) + 1)):
for element in dump:
toadd.append(dump[i] + value - 1)
i = i + 1
i = 0
dump = dump + toadd
if plot is True:
oszlops = max(dump) - min(dump) + 1
plt.hist(sorted(dump), bins=oszlops, normed=1)
# plt.show()
if sort is True:
ordered = sorted(dump)
paired = {}
before = 0
for element in ordered:
if element != before:
paired[str(element)] = 1
else:
paired[str(element)] += 1
before = element
return paired
def manythrows(self, plot=False, throws=1000):
"""Throws the dice combination many times, capable of plotting it"""
# results=[]
dump = []
for i in range(1, throws):
throw = self.do()
# results[self.do()]+=1
dump.append(throw)
if plot is False:
# return results
pass
if plot is True:
# return results
oszlops = max(dump) - min(dump) + 1
plt.hist(dump, bins=oszlops, normed=1)
# plt.show()
# print dump
# class definition
# created class of combination, with many functions
plt.show()
| kergely/Character-creator-szerep | src/dices.py | Python | mit | 3,896 |
#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#File for a Thrift class which can be used by the status-dat-broker
import re
import copy
import os
import time
from shinken.external_command import ExternalCommand
from shinken.util import from_bool_to_int, from_float_to_int, to_int, to_split, get_customs_keys, get_customs_values
from shinken.modules.livestatus_broker.hooker import Hooker
from shinken.modules.livestatus_broker.mapping import out_map
from shinken.modules.livestatus_broker.livestatus_counters import LiveStatusCounters
from shinken.modules.livestatus_broker.log_line import Logline
def join_with_separators(prop, ref, request, *args):
if request.response.outputformat == 'csv':
return request.response.separators[3].join([str(arg) for arg in args])
elif request.response.outputformat == 'json' or request.response.outputformat == 'python':
return args
else:
return None
pass
def worst_host_state(state_1, state_2):
"""Return the worst of two host states."""
#lambda x: reduce(lambda g, c: c if g == 0 else (c if c == 1 else g), (y.state_id for y in x), 0),
if state_2 == 0:
return state_1
if state_1 == 1:
return state_1
return state_2
def worst_service_state(state_1, state_2):
"""Return the worst of two service states."""
#reduce(lambda g, c: c if g == 0 else (c if c == 2 else (c if (c == 3 and g != 2) else g)), (z.state_id for y in x for z in y.services if z.state_type_id == 1), 0),
if state_2 == 0:
return state_1
if state_1 == 2:
return state_1
if state_1 == 3 and state_2 != 2:
return state_1
return state_2
class Thrift_status(object, Hooker):
"""A class that represents the status of all objects in the broker
"""
# Use out_map from the mapping.py file
out_map = out_map
def __init__(self, configs, hosts, services, contacts, hostgroups, servicegroups, contactgroups, timeperiods, commands, schedulers, pollers, reactionners, brokers, dbconn, pnp_path, return_queue):
self.configs = configs
self.hosts = hosts
self.services = services
self.contacts = contacts
self.hostgroups = hostgroups
self.servicegroups = servicegroups
self.contactgroups = contactgroups
self.timeperiods = timeperiods
self.commands = commands
self.schedulers = schedulers
self.pollers = pollers
self.reactionners = reactionners
self.brokers = brokers
self.dbconn = dbconn
Thrift_status.pnp_path = pnp_path
self.debuglevel = 2
self.dbconn.row_factory = self.row_factory
self.return_queue = return_queue
self.create_out_map_delegates()
self.create_out_map_hooks()
# add Host attributes to Hostsbygroup etc.
for attribute in Thrift_status.out_map['Host']:
Thrift_status.out_map['Hostsbygroup'][attribute] = Thrift_status.out_map['Host'][attribute]
for attribute in self.out_map['Service']:
Thrift_status.out_map['Servicesbygroup'][attribute] = Thrift_status.out_map['Service'][attribute]
for attribute in self.out_map['Service']:
Thrift_status.out_map['Servicesbyhostgroup'][attribute] = Thrift_status.out_map['Service'][attribute]
self.counters = LiveStatusCounters()
def row_factory(self, cursor, row):
"""Handler for the sqlite fetch method."""
return Logline(cursor, row)
def handle_request(self, data):
"""Execute the thrift request.
This function creates a ThriftRequest method, calls the parser,
handles the execution of the request and formatting of the result.
"""
request = ThriftRequest(data, self.configs, self.hosts, self.services,
self.contacts, self.hostgroups, self.servicegroups, self.contactgroups, self.timeperiods, self.commands,
self.schedulers, self.pollers, self.reactionners, self.brokers, self.dbconn, self.pnp_path, self.return_queue, self.counters)
request.parse_input(data)
#print "REQUEST\n%s\n" % data
to_del = []
if sorted([q.my_type for q in request.queries]) == ['command', 'query', 'wait']:
# The Multisite way
for query in [q for q in request.queries if q.my_type == 'command']:
result = query.launch_query()
response = query.response
response.format_live_data(result, query.columns, query.aliases)
output, keepalive = response.respond()
output = [q for q in request.queries if q.my_type == 'wait'] + [q for q in request.queries if q.my_type == 'query']
elif sorted([q.my_type for q in request.queries]) == ['query', 'wait']:
# The Thruk way
output = [q for q in request.queries if q.my_type == 'wait'] + [q for q in request.queries if q.my_type == 'query']
keepalive = True
elif sorted([q.my_type for q in request.queries]) == ['command', 'query']:
for query in [q for q in request.queries if q.my_type == 'command']:
result = query.launch_query()
response = query.response
response.format_live_data(result, query.columns, query.aliases)
output, keepalive = response.respond()
for query in [q for q in request.queries if q.my_type == 'query']:
# This was a simple query, respond immediately
result = query.launch_query()
# Now bring the retrieved information to a form which can be sent back to the client
response = query.response
response.format_live_data(result, query.columns, query.aliases)
output, keepalive = response.respond()
elif sorted([q.my_type for q in request.queries]) == ['query']:
for query in [q for q in request.queries if q.my_type == 'query']:
# This was a simple query, respond immediately
result = query.launch_query()
# Now bring the retrieved information to a form which can be sent back to the client
response = query.response
response.format_live_data(result, query.columns, query.aliases)
output, keepalive = response.respond()
elif sorted([q.my_type for q in request.queries]) == ['command']:
for query in [q for q in request.queries if q.my_type == 'command']:
result = query.launch_query()
response = query.response
response.format_live_data(result, query.columns, query.aliases)
output, keepalive = response.respond()
elif [q.my_type for q in request.queries if q.my_type != 'command'] == []:
# Only external commands. Thruk uses it when it sends multiple
# objects into a downtime.
for query in [q for q in request.queries if q.my_type == 'command']:
result = query.launch_query()
response = query.response
response.format_live_data(result, query.columns, query.aliases)
output, keepalive = response.respond()
else:
# We currently do not handle this kind of composed request
output = ""
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "We currently do not handle this kind of composed request"
print sorted([q.my_type for q in request.queries])
#print "RESPONSE\n%s\n" % output
print "DURATION %.4fs" % (time.time() - request.tic)
return output, keepalive
def create_out_map_delegates(self):
"""Add delegate keys for certain attributes.
Some attributes are not directly reachable via prop or
need a complicated depythonize function.
Example: Logline (the objects created for a "GET log" request
have the column current_host_state. The Logline object does
not have an attribute of this name, but a log_host attribute.
The Host object represented by log_host has an attribute state
which is the desired current_host_state. Because it's the same
for all columns starting with current_host, a rule can
be applied that automatically redirects the resolving to the
corresponding object. Instead of creating a complicated
depythonize handler which gets log_host and then state, two new
keys for Logline/current_host_state are added:
delegate = log_host
as = state
This instructs the hook function to first get attribute state of
the object represented by log_host.
"""
delegate_map = {
'Logline' : {
'current_service_' : 'log_service',
'current_host_' : 'log_host',
},
'Service' : {
'host_' : 'host',
},
'Comment' : {
'service_' : 'ref',
'host_' : 'ref',
},
'Downtime' : {
'service_' : 'ref',
'host_' : 'ref',
}
}
for objtype in Thrift_status.out_map:
for attribute in Thrift_status.out_map[objtype]:
entry = Thrift_status.out_map[objtype][attribute]
if objtype in delegate_map:
for prefix in delegate_map[objtype]:
if attribute.startswith(prefix):
if 'delegate' not in entry:
entry['delegate'] = delegate_map[objtype][prefix]
entry['as'] = attribute.replace(prefix, '')
def count_event(self, counter):
self.counters.increment(counter)
| baloo/shinken | shinken/modules/thrift_broker/thrift_status.py | Python | agpl-3.0 | 10,583 |
#!/usr/bin/python
size=77050
arr = [None] * size
max_value=-1
max_position=-1
for i in range(1, size):
if i%1000==0:
print "got to", i
count = 0
series=[]
n = i
while n!=1:
if n>len(arr):
diff=n-len(arr)+1
extra_array=[None]*diff
arr.extend(extra_array)
print "extended to", len(arr)
if arr[n] is not None:
count = arr[n]+len(series)
for k in series:
arr[k]=count
count-=1
break
else:
series.append(n)
if n%2==0:
n=n/2
else:
n=n*3+1
count +=1
if count>max_value:
max_position=i
max_value=count
print "max_position", max_position
print "max_value", max_value
| veltzer/riddling | instances/series/solution_better.py | Python | gpl-3.0 | 627 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import codecs
import errno
import fnmatch
import locale
import logging
import os
import pytz
import re
import shutil
import sys
import traceback
import pickle
import datetime
from collections import Hashable
from contextlib import contextmanager
import dateutil.parser
from functools import partial
from itertools import groupby
from jinja2 import Markup
from operator import attrgetter
from posixpath import join as posix_join
logger = logging.getLogger(__name__)
def strftime(date, date_format):
'''
Replacement for built-in strftime
This is necessary because of the way Py2 handles date format strings.
Specifically, Py2 strftime takes a bytestring. In the case of text output
(e.g. %b, %a, etc), the output is encoded with an encoding defined by
locale.LC_TIME. Things get messy if the formatting string has chars that
are not valid in LC_TIME defined encoding.
This works by 'grabbing' possible format strings (those starting with %),
formatting them with the date, (if necessary) decoding the output and
replacing formatted output back.
'''
c89_directives = 'aAbBcdfHIjmMpSUwWxXyYzZ%'
strip_zeros = lambda x: x.lstrip('0') or '0'
# grab candidate format options
format_options = '%[-]?.'
candidates = re.findall(format_options, date_format)
# replace candidates with placeholders for later % formatting
template = re.sub(format_options, '%s', date_format)
# we need to convert formatted dates back to unicode in Py2
# LC_TIME determines the encoding for built-in strftime outputs
lang_code, enc = locale.getlocale(locale.LC_TIME)
formatted_candidates = []
for candidate in candidates:
# test for valid C89 directives only
if candidate[-1] in c89_directives:
# check for '-' prefix
if len(candidate) == 3:
# '-' prefix
candidate = '%{}'.format(candidate[-1])
conversion = strip_zeros
else:
conversion = None
# format date
if isinstance(date, SafeDatetime):
formatted = date.strftime(candidate, safe=False)
else:
formatted = date.strftime(candidate)
# convert Py2 result to unicode
if not six.PY3 and enc is not None:
formatted = formatted.decode(enc)
# strip zeros if '-' prefix is used
if conversion:
formatted = conversion(formatted)
else:
formatted = candidate
formatted_candidates.append(formatted)
# put formatted candidates back and return
return template % tuple(formatted_candidates)
class SafeDatetime(datetime.datetime):
'''Subclass of datetime that works with utf-8 format strings on PY2'''
def strftime(self, fmt, safe=True):
'''Uses our custom strftime if supposed to be *safe*'''
if safe:
return strftime(self, fmt)
else:
return super(SafeDatetime, self).strftime(fmt)
class DateFormatter(object):
'''A date formatter object used as a jinja filter
Uses the `strftime` implementation and makes sure jinja uses the locale
defined in LOCALE setting
'''
def __init__(self):
self.locale = locale.setlocale(locale.LC_TIME)
def __call__(self, date, date_format):
old_lc_time = locale.setlocale(locale.LC_TIME)
old_lc_ctype = locale.setlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_TIME, self.locale)
# on OSX, encoding from LC_CTYPE determines the unicode output in PY3
# make sure it's same as LC_TIME
locale.setlocale(locale.LC_CTYPE, self.locale)
formatted = strftime(date, date_format)
locale.setlocale(locale.LC_TIME, old_lc_time)
locale.setlocale(locale.LC_CTYPE, old_lc_ctype)
return formatted
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
From django.utils.encoding.
"""
if not six.PY3:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
class memoized(object):
"""Function decorator to cache return values.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return partial(self.__call__, obj)
def deprecated_attribute(old, new, since=None, remove=None, doc=None):
"""Attribute deprecation decorator for gentle upgrades
For example:
class MyClass (object):
@deprecated_attribute(
old='abc', new='xyz', since=(3, 2, 0), remove=(4, 1, 3))
def abc(): return None
def __init__(self):
xyz = 5
Note that the decorator needs a dummy method to attach to, but the
content of the dummy method is ignored.
"""
def _warn():
version = '.'.join(six.text_type(x) for x in since)
message = ['{} has been deprecated since {}'.format(old, version)]
if remove:
version = '.'.join(six.text_type(x) for x in remove)
message.append(
' and will be removed by version {}'.format(version))
message.append('. Use {} instead.'.format(new))
logger.warning(''.join(message))
logger.debug(''.join(
six.text_type(x) for x in traceback.format_stack()))
def fget(self):
_warn()
return getattr(self, new)
def fset(self, value):
_warn()
setattr(self, new, value)
def decorator(dummy):
return property(fget=fget, fset=fset, doc=doc)
return decorator
def get_date(string):
"""Return a datetime object from a string.
If no format matches the given date, raise a ValueError.
"""
string = re.sub(' +', ' ', string)
default = SafeDatetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
try:
return dateutil.parser.parse(string, default=default)
except (TypeError, ValueError):
raise ValueError('{0!r} is not a valid date'.format(string))
@contextmanager
def pelican_open(filename, mode='rb', strip_crs=(sys.platform == 'win32')):
"""Open a file and return its content"""
with codecs.open(filename, mode, encoding='utf-8') as infile:
content = infile.read()
if content[0] == codecs.BOM_UTF8.decode('utf8'):
content = content[1:]
if strip_crs:
content = content.replace('\r\n', '\n')
yield content
def slugify(value, substitutions=()):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
Took from Django sources.
"""
# TODO Maybe steal again from current Django 1.5dev
value = Markup(value).striptags()
# value must be unicode per se
import unicodedata
from unidecode import unidecode
# unidecode returns str in Py2 and 3, so in Py2 we have to make
# it unicode again
value = unidecode(value)
if isinstance(value, six.binary_type):
value = value.decode('ascii')
# still unicode
value = unicodedata.normalize('NFKD', value).lower()
for src, dst in substitutions:
value = value.replace(src.lower(), dst.lower())
value = re.sub('[^\w\s-]', '', value).strip()
value = re.sub('[-\s]+', '-', value)
# we want only ASCII chars
value = value.encode('ascii', 'ignore')
# but Pelican should generally use only unicode
return value.decode('ascii')
def copy(source, destination, ignores=None):
"""Recursively copy source into destination.
If source is a file, destination has to be a file as well.
The function is able to copy either files or directories.
:param source: the source file or directory
:param destination: the destination file or directory
:param ignores: either None, or a list of glob patterns;
files matching those patterns will _not_ be copied.
"""
def walk_error(err):
logger.warning("While copying %s: %s: %s",
source_, err.filename, err.strerror)
source_ = os.path.abspath(os.path.expanduser(source))
destination_ = os.path.abspath(os.path.expanduser(destination))
if ignores is None:
ignores = []
if any(fnmatch.fnmatch(os.path.basename(source), ignore)
for ignore in ignores):
logger.info('Not copying %s due to ignores', source_)
return
if os.path.isfile(source_):
dst_dir = os.path.dirname(destination_)
if not os.path.exists(dst_dir):
logger.info('Creating directory %s', dst_dir)
os.makedirs(dst_dir)
logger.info('Copying %s to %s', source_, destination_)
shutil.copy2(source_, destination_)
elif os.path.isdir(source_):
if not os.path.exists(destination_):
logger.info('Creating directory %s', destination_)
os.makedirs(destination_)
if not os.path.isdir(destination_):
logger.warning('Cannot copy %s (a directory) to %s (a file)',
source_, destination_)
return
for src_dir, subdirs, others in os.walk(source_):
dst_dir = os.path.join(destination_,
os.path.relpath(src_dir, source_))
subdirs[:] = (s for s in subdirs if not any(fnmatch.fnmatch(s, i)
for i in ignores))
others[:] = (o for o in others if not any(fnmatch.fnmatch(o, i)
for i in ignores))
if not os.path.isdir(dst_dir):
logger.info('Creating directory %s', dst_dir)
# Parent directories are known to exist, so 'mkdir' suffices.
os.mkdir(dst_dir)
for o in others:
src_path = os.path.join(src_dir, o)
dst_path = os.path.join(dst_dir, o)
if os.path.isfile(src_path):
logger.info('Copying %s to %s', src_path, dst_path)
shutil.copy2(src_path, dst_path)
else:
logger.warning('Skipped copy %s (not a file or directory) to %s',
src_path, dst_path)
def clean_output_dir(path, retention):
"""Remove all files from output directory except those in retention list"""
if not os.path.exists(path):
logger.debug("Directory already removed: %s", path)
return
if not os.path.isdir(path):
try:
os.remove(path)
except Exception as e:
logger.error("Unable to delete file %s; %s", path, e)
return
# remove existing content from output folder unless in retention list
for filename in os.listdir(path):
file = os.path.join(path, filename)
if any(filename == retain for retain in retention):
logger.debug("Skipping deletion; %s is on retention list: %s",
filename, file)
elif os.path.isdir(file):
try:
shutil.rmtree(file)
logger.debug("Deleted directory %s", file)
except Exception as e:
logger.error("Unable to delete directory %s; %s",
file, e)
elif os.path.isfile(file) or os.path.islink(file):
try:
os.remove(file)
logger.debug("Deleted file/link %s", file)
except Exception as e:
logger.error("Unable to delete file %s; %s", file, e)
else:
logger.error("Unable to delete %s, file type unknown", file)
def get_relative_path(path):
"""Return the relative path from the given path to the root path."""
components = split_all(path)
if len(components) <= 1:
return os.curdir
else:
parents = [os.pardir] * (len(components) - 1)
return os.path.join(*parents)
def path_to_url(path):
"""Return the URL corresponding to a given path."""
if os.sep == '/':
return path
else:
return '/'.join(split_all(path))
def posixize_path(rel_path):
"""Use '/' as path separator, so that source references,
like '{filename}/foo/bar.jpg' or 'extras/favicon.ico',
will work on Windows as well as on Mac and Linux."""
return rel_path.replace(os.sep, '/')
def truncate_html_words(s, num, end_text='...'):
"""Truncates HTML to a certain number of words.
(not counting tags and comments). Closes opened tags if they were correctly
closed in the given html. Takes an optional argument of what should be used
to notify that the string has been truncated, defaulting to ellipsis (...).
Newlines in the HTML are preserved. (From the django framework).
"""
length = int(num)
if length <= 0:
return ''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area',
'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:end_text_pos]
if end_text:
out += ' ' + end_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
def process_translations(content_list, order_by=None):
""" Finds translation and returns them.
Returns a tuple with two lists (index, translations). Index list includes
items in default language or items which have no variant in default
language. Items with the `translation` metadata set to something else than
`False` or `false` will be used as translations, unless all the items with
the same slug have that metadata.
For each content_list item, sets the 'translations' attribute.
order_by can be a string of an attribute or sorting function. If order_by
is defined, content will be ordered by that attribute or sorting function.
By default, content is ordered by slug.
Different content types can have default order_by attributes defined
in settings, e.g. PAGES_ORDER_BY='sort-order', in which case `sort-order`
should be a defined metadata attribute in each page.
"""
content_list.sort(key=attrgetter('slug'))
grouped_by_slugs = groupby(content_list, attrgetter('slug'))
index = []
translations = []
for slug, items in grouped_by_slugs:
items = list(items)
# items with `translation` metadata will be used as translations…
default_lang_items = list(filter(
lambda i: i.metadata.get('translation', 'false').lower()
== 'false',
items))
# …unless all items with that slug are translations
if not default_lang_items:
default_lang_items = items
# display warnings if several items have the same lang
for lang, lang_items in groupby(items, attrgetter('lang')):
lang_items = list(lang_items)
len_ = len(lang_items)
if len_ > 1:
logger.warning('There are %s variants of "%s" with lang %s',
len_, slug, lang)
for x in lang_items:
logger.warning('\t%s', x.source_path)
# find items with default language
default_lang_items = list(filter(attrgetter('in_default_lang'),
default_lang_items))
# if there is no article with default language, take an other one
if not default_lang_items:
default_lang_items = items[:1]
if not slug:
logger.warning(
'empty slug for %s. '
'You can fix this by adding a title or a slug to your '
'content',
default_lang_items[0].source_path)
index.extend(default_lang_items)
translations.extend([x for x in items if x not in default_lang_items])
for a in items:
a.translations = [x for x in items if x != a]
if order_by:
if callable(order_by):
try:
index.sort(key=order_by)
except Exception:
logger.error('Error sorting with function %s', order_by)
elif isinstance(order_by, six.string_types):
if order_by.startswith('reversed-'):
order_reversed = True
order_by = order_by.replace('reversed-', '', 1)
else:
order_reversed = False
if order_by == 'basename':
index.sort(key=lambda x: os.path.basename(x.source_path or ''),
reverse=order_reversed)
# already sorted by slug, no need to sort again
elif not (order_by == 'slug' and not order_reversed):
try:
index.sort(key=attrgetter(order_by),
reverse=order_reversed)
except AttributeError:
logger.warning('There is no "%s" attribute in the item '
'metadata. Defaulting to slug order.', order_by)
else:
logger.warning('Invalid *_ORDER_BY setting (%s).'
'Valid options are strings and functions.', order_by)
return index, translations
def folder_watcher(path, extensions, ignores=[]):
'''Generator for monitoring a folder for modifications.
Returns a boolean indicating if files are changed since last check.
Returns None if there are no matching files in the folder'''
def file_times(path):
'''Return `mtime` for each file in path'''
for root, dirs, files in os.walk(path, followlinks=True):
dirs[:] = [x for x in dirs if not x.startswith(os.curdir)]
for f in files:
if (f.endswith(tuple(extensions)) and
not any(fnmatch.fnmatch(f, ignore) for ignore in ignores)):
try:
yield os.stat(os.path.join(root, f)).st_mtime
except OSError as e:
logger.warning('Caught Exception: %s', e)
LAST_MTIME = 0
while True:
try:
mtime = max(file_times(path))
if mtime > LAST_MTIME:
LAST_MTIME = mtime
yield True
except ValueError:
yield None
else:
yield False
def file_watcher(path):
'''Generator for monitoring a file for modifications'''
LAST_MTIME = 0
while True:
if path:
try:
mtime = os.stat(path).st_mtime
except OSError as e:
logger.warning('Caught Exception: %s', e)
continue
if mtime > LAST_MTIME:
LAST_MTIME = mtime
yield True
else:
yield False
else:
yield None
def set_date_tzinfo(d, tz_name=None):
"""Set the timezone for dates that don't have tzinfo"""
if tz_name and not d.tzinfo:
tz = pytz.timezone(tz_name)
d = tz.localize(d)
return SafeDatetime(d.year, d.month, d.day, d.hour, d.minute, d.second,
d.microsecond, d.tzinfo)
return d
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def split_all(path):
"""Split a path into a list of components
While os.path.split() splits a single component off the back of
`path`, this function splits all components:
>>> split_all(os.path.join('a', 'b', 'c'))
['a', 'b', 'c']
"""
components = []
path = path.lstrip('/')
while path:
head, tail = os.path.split(path)
if tail:
components.insert(0, tail)
elif head == path:
components.insert(0, head)
break
path = head
return components
def is_selected_for_writing(settings, path):
'''Check whether path is selected for writing
according to the WRITE_SELECTED list
If WRITE_SELECTED is an empty list (default),
any path is selected for writing.
'''
if settings['WRITE_SELECTED']:
return path in settings['WRITE_SELECTED']
else:
return True
def path_to_file_url(path):
'''Convert file-system path to file:// URL'''
return six.moves.urllib_parse.urljoin(
"file://", six.moves.urllib.request.pathname2url(path))
def maybe_pluralize(count, singular, plural):
'''
Returns a formatted string containing count and plural if count is not 1
Returns count and singular if count is 1
maybe_pluralize(0, 'Article', 'Articles') -> '0 Articles'
maybe_pluralize(1, 'Article', 'Articles') -> '1 Article'
maybe_pluralize(2, 'Article', 'Articles') -> '2 Articles'
'''
selection = plural
if count == 1:
selection = singular
return '{} {}'.format(count, selection)
| levanhien8/pelican | pelican/utils.py | Python | agpl-3.0 | 23,526 |
# -*- coding: utf-8 -*-
#
import base64
import crcmod
import datetime
import json
import hashlib
import hmac
import urlparse
import time
import os
import oss2
from django.conf import settings
from django.utils import timezone
from chisch.common import dependency
# from django.conf import settings
# 以下代码展示了PostObject的用法。PostObject不依赖于OSS Python SDK。
# POST表单域的详细说明请参RFC2388 https://tools.ietf.org/html/rfc2388
# PostObject的官网 https://help.aliyun.com/document_detail/31988.html
# PostObject错误及排查 https://yq.aliyun.com/articles/58524
def calculate_crc64(data):
"""计算文件的MD5
:param data: 数据
:return 数据的MD5值
"""
_POLY = 0x142F0E1EBA9EA3693
_XOROUT = 0XFFFFFFFFFFFFFFFF
crc64 = crcmod.Crc(_POLY, initCrc=0, xorOut=_XOROUT)
crc64.update(data)
return crc64.crcValue
def build_gmt_expired_time(expire_time):
"""生成GMT格式的请求超时时间
:param int expire_time: 超时时间,单位秒
:return str GMT格式的超时时间
"""
now = int(time.time())
expire_syncpoint = now + expire_time
expire_gmt = datetime.datetime.fromtimestamp(expire_syncpoint).isoformat()
expire_gmt += 'Z'
return expire_gmt
def build_encode_policy(expired_time, condition_list):
"""生成policy
:param int expired_time: 超时时间,单位秒
:param list condition_list: 限制条件列表
"""
policy_dict = {
'expiration': build_gmt_expired_time(expired_time),
'conditions': condition_list
}
policy = json.dumps(policy_dict).strip()
policy_encode = base64.b64encode(policy)
return policy_encode
def build_signature(access_key_secret, encode_policy):
"""生成签名
:param str access_key_secret: access key secret
:param str encode_policy: 编码后的Policy
:return str 请求签名
"""
h = hmac.new(access_key_secret, encode_policy, hashlib.sha1)
signature = base64.encodestring(h.digest()).strip()
return signature
def bulid_callback(cb_url, cb_body, cb_body_type=None, cb_host=None):
"""生成callback字符串
:param str cb_url: 回调服务器地址,文件上传成功后OSS向此url发送回调请求
:param str cb_body: 发起回调请求的Content-Type,默认application/x-www-form-urlencoded
:param str cb_body_type: 发起回调时请求body
:param str cb_host: 发起回调请求时Host头的值
:return str 编码后的Callback
"""
callback_dict = {
'callbackUrl': cb_url,
'callbackBody': cb_body,
}
if cb_body_type is None:
callback_dict['callbackBodyType'] = 'application/x-www-form-urlencoded'
else:
callback_dict['callbackBodyType'] = cb_body_type
if cb_host is not None:
callback_dict['callbackHost'] = cb_host
callback_param = json.dumps(callback_dict).strip()
base64_callback = base64.b64encode(callback_param);
return base64_callback
def build_post_url(endpoint, bucket_name):
"""生成POST请求URL
:param str endpoint: endpoint
:param str bucket_name: bucket name
:return str POST请求URL
"""
if endpoint.startswith('http://'):
return endpoint.replace('http://', 'http://{0}.'.format(bucket_name))
elif endpoint.startswith('https://'):
return endpoint.replace('https://', 'https://{0}.'.format(bucket_name))
else:
return 'http://{0}.{1}'.format(bucket_name, endpoint)
def build_post_body(field_dict, boundary):
"""生成POST请求Body
:param dict field_dict: POST请求表单域
:param str boundary: 表单域的边界字符串
:return str POST请求Body
"""
post_body = b''
# 编码表单域
for k, v in field_dict.iteritems():
if k != 'content' and k != 'content-type':
post_body += '''--{0}\r\nContent-Disposition: form-data; name=\"{1}\"\r\n\r\n{2}\r\n'''.format(
boundary, k, v)
# 上传文件的内容,必须作为最后一个表单域
post_body += '''--{0}\r\nContent-Disposition: form-data; name=\"file\"; filename=\"{1}\"\r\nContent-Type: {2}\r\n\r\n{3}'''.format(
boundary, field_dict['key'], field_dict['content-type'],
field_dict['content'])
# 加上表单域结束符
post_body += '\r\n--{0}--\r\n'.format(boundary)
return post_body
def build_post_headers(body_len, boundary, headers=None):
"""生气POST请求Header
:param str body_len: POST请求Body长度
:param str boundary: 表单域的边界字符串
:param dict 请求Header
"""
headers = headers if headers else {}
headers['Content-Length'] = str(body_len)
headers['Content-Type'] = 'multipart/form-data; boundary={0}'.format(
boundary)
return headers
def get_object_key(action, attachment_id, file_suffix):
aliyun_oss = settings.ALIYUN_OSS
key_sub_elements = aliyun_oss['OBJECT_KEYS_SUB_ELEMENTS'][action]
if key_sub_elements.get('only_one', False):
key = key_sub_elements['path'] + str(attachment_id) + file_suffix
return key
else:
pass
def get_time_from_sign_url(url):
query = urlparse.urlparse(url).query
kwargs = dict([(k, v[0]) for k, v in urlparse.parse_qs(query).items()])
return timezone.datetime.fromtimestamp(int(kwargs['Expires']))
@dependency.provider('oss_manager')
class OssManager(object):
def __init__(self):
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
ALIYUN_OSS = settings.ALIYUN_OSS
access_key_id = ALIYUN_OSS.get('ACCESS_KEY_ID')
access_key_secret = ALIYUN_OSS.get('ACCESS_KEY_SECRET')
auth = oss2.Auth(access_key_id, access_key_secret)
self.bucket_name = ALIYUN_OSS.get('BUCKET_NAME')
self.endpoint = ALIYUN_OSS.get('ENDPOINT')
self.bucket = oss2.Bucket(auth, self.endpoint, self.bucket_name)
def get_object_url(self):
pass
def single_object_upload(self, key, f, permission):
from oss2 import SizedFileAdapter, determine_part_size
from oss2.models import PartInfo
preferred_size = settings.OBJECT_PREFERRED_SIZE
total_size = os.path.getsize(f['path'])
part_size = determine_part_size(total_size,
preferred_size=preferred_size)
# 初始化分片
upload_id = self.bucket.init_multipart_upload(key).upload_id
parts = []
# 逐个上传分片
with open(f['path'], 'rb') as fileobj:
part_number = 1
offset = 0
try:
while offset < total_size:
num_to_upload = min(part_size, total_size - offset)
result = self.bucket.upload_part(key,
upload_id,
part_number,
SizedFileAdapter(
fileobj,
num_to_upload
))
parts.append(PartInfo(part_number, result.etag))
offset += num_to_upload
part_number += 1
except Exception, e:
raise e
try:
resp = self.bucket.complete_multipart_upload(key, upload_id, parts)
self.bucket.put_object_acl(key, permission)
except Exception, e:
raise e
if str(resp.status).startswith('20'):
if permission == oss2.OBJECT_ACL_PUBLIC_READ:
file_host = self.endpoint.replace('://',
'://' + self.bucket_name + '.')
file_url = file_host + key
invalid_at = None
else:
expires = settings.ALIYUN_OSS['VIDEO_SIGN_URL_EXPIRES']
file_url = self.bucket.sign_url('GET',
key,
expires)
invalid_at = get_time_from_sign_url(file_url)
return file_url, invalid_at
else:
raise Exception
| zhaowenxiang/chisch | oss/cores.py | Python | mit | 8,433 |
'''
libpcap.py contains the function in libpcap but not in winpcap, not all function are already implemented
'''
from .pcap import _pcap, PcapError, PCAP_ERRBUF_SIZE, pcap_t_p
from ctypes import *
from libpcapy.pcap import PcapWarning
# Error codes for the pcap API.
# These will all be negative, so you can check for the success or
# failure of a call that returns these codes by checking for a
# negative value.
PCAP_ERROR = -1 # generic error code
PCAP_ERROR_BREAK = -2 # loop terminated by pcap_breakloop
PCAP_ERROR_NOT_ACTIVATED = -3 # the capture needs to be activated
PCAP_ERROR_ACTIVATED = -4 # the operation can't be performed on already activated captures
PCAP_ERROR_NO_SUCH_DEVICE = -5 # no such device exists
PCAP_ERROR_RFMON_NOTSUP = -6 # this device doesn't support rfmon (monitor) mode
PCAP_ERROR_NOT_RFMON = -7 # operation supported only in monitor mode
PCAP_ERROR_PERM_DENIED = -8 # no permission to open the device
PCAP_ERROR_IFACE_NOT_UP = -9 # interface isn't up
PCAP_ERROR_CANTSET_TSTAMP_TYPE = -10 # this device doesn't support setting the time stamp type
PCAP_ERROR_PROMISC_PERM_DENIED = -11 # you don't have permission to capture in promiscuous mode
# Warning codes for the pcap API.
# These will all be positive and non-zero, so they won't look like
# errors.
PCAP_WARNING = 1 # generic warning code
PCAP_WARNING_PROMISC_NOTSUP = 2 # this device doesn't support promiscuous mode
PCAP_WARNING_TSTAMP_TYPE_NOTSUP = 3 # the requested time stamp type is not supported
def pcap_create(device):
'''create a live capture handle
pcap_create() is used to create a packet capture handle to look at
packets on the network. source is a string that specifies the network
device to open; on Linux systems with 2.2 or later kernels, a source
argument of "any" or NULL can be used to capture packets from all
interfaces.
The returned handle must be activated with pcap_activate() before pack-
ets can be captured with it; options for the capture, such as promiscu-
ous mode, can be set on the handle before activating it.
'''
errbuf = create_string_buffer(PCAP_ERRBUF_SIZE)
_pcap.pcap_create.restype = pcap_t_p
hpcap = _pcap.pcap_create(device.encode(), errbuf)
if hpcap:
return hpcap
else:
raise PcapError(errbuf.raw.decode())
__pcap_errno_str={}
__pcap_errno_str[PCAP_ERROR_BREAK]="loop terminated by pcap_breakloop"
__pcap_errno_str[PCAP_ERROR_NOT_ACTIVATED]="the capture needs to be activated"
__pcap_errno_str[PCAP_ERROR_ACTIVATED]="the operation can't be performed on already activated captures"
__pcap_errno_str[PCAP_ERROR_NO_SUCH_DEVICE]="no such device exists"
__pcap_errno_str[PCAP_ERROR_RFMON_NOTSUP]="this device doesn't support rfmon (monitor) mode"
__pcap_errno_str[PCAP_ERROR_NOT_RFMON]="operation supported only in monitor mode"
__pcap_errno_str[PCAP_ERROR_PERM_DENIED]="no permission to open the device"
__pcap_errno_str[PCAP_ERROR_IFACE_NOT_UP]="interface isn't up"
__pcap_errno_str[PCAP_ERROR_CANTSET_TSTAMP_TYPE]="this device doesn't support setting the time stamp type"
__pcap_errno_str[PCAP_ERROR_PROMISC_PERM_DENIED]="you don't have permission to capture in promiscuous mode"
__pcap_warnno_str={}
__pcap_warnno_str[PCAP_WARNING_PROMISC_NOTSUP]="this device doesn't support promiscuous mode"
__pcap_warnno_str[PCAP_WARNING_TSTAMP_TYPE_NOTSUP]="the requested time stamp type is not supported"
def __pcap_check_retcode(hpcap, retcode):
if retcode == 0:
return
elif retcode == PCAP_ERROR:
_pcap.pcap_geterr.restype = c_char_p
err = _pcap.pcap_geterr(hpcap)
raise PcapError(err.decode())
elif retcode == PCAP_WARNING:
_pcap.pcap_geterr.restype = c_char_p
err = _pcap.pcap_geterr(hpcap)
raise PcapWarning(err.decode())
elif retcode < 0:
raise PcapError(__pcap_errno_str[retcode], retcode)
else:
raise PcapWarning(__pcap_warnno_str[retcode], retcode)
def pcap_set_snaplen(hpcap, len):
'''set the snapshot length for a not-yet-activated cap-ture handle
sets the snapshot length to be used on a capture
handle when the handle is activated to snaplen
'''
retcode = _pcap.pcap_set_snaplen(hpcap, len)
__pcap_check_retcode(hpcap, retcode)
def pcap_set_promisc(hpcap, is_promisc):
'''set promiscuous mode for a not-yet-activated capture handle
sets whether promiscuous mode should be set on a
capture handle when the handle is activated. If promisc is non-zero,
promiscuous mode will be set, otherwise it will not be set.
'''
retcode = _pcap.pcap_set_promisc(hpcap, int(is_promisc))
__pcap_check_retcode(hpcap, retcode)
def pcap_can_set_rfmon(hpcap):
'''check whether monitor mode can be set for a not-yet-activated capture handle
checks whether monitor mode could be set on a capture handle when the handle is activated
'''
retcode = _pcap.pcap_can_set_rfmon(hpcap)
if retcode == 0:
return False
elif retcode == 1:
return True
else:
__pcap_check_retcode(hpcap, retcode)
def pcap_set_rfmon(hpcap, is_rfmon):
'''set monitor mode for a not-yet-activated capture handle
sets whether monitor mode should be set on a capture
handle when the handle is activated. If rfmon is non-zero, monitor
mode will be set, otherwise it will not be set.
'''
retcode = _pcap.pcap_set_rfmon(hpcap, int(is_rfmon))
__pcap_check_retcode(hpcap, retcode)
def pcap_set_timeout(hpcap, timeout):
'''set the read timeout for a not-yet-activated capture handle
sets the read timeout that will be used on a capture
handle when the handle is activated to to_ms, which is in units of milliseconds.
'''
__pcap_check_retcode(hpcap, _pcap.pcap_set_timeout(hpcap, timeout))
#def pcap_set_tstamp_type(hpcap, tstamp_type):
#'''set the time stamp type to be used by a capture device
#sets the the type of time stamp desired for
#packets captured on the pcap descriptor to the type specified by
#tstamp_type. It must be called on a pcap descriptor created by
#pcap_create() that has not yet been activated by pcap_activate().
#pcap_list_tstamp_types() will give a list of the time stamp types sup-
#ported by a given capture device. See pcap-tstamp(7) for a list of all
#the time stamp types.
#'''
#__pcap_check_retcode(hpcap, _pcap.pcap_set_tstamp_type(hpcap, tstamp_type))
def pcap_set_buffer_size(hpcap, size):
'''set the buffer size for a not-yet-activated capture handle
sets the buffer size that will be used on a capture
handle when the handle is activated to buffer_size, which is in
units of bytes.
'''
__pcap_check_retcode(hpcap, _pcap.pcap_set_buffer_size(hpcap, size))
def pcap_active(hpcap):
'''activate a capture handle
used to activate a packet capture handle to look at
packets on the network, with the options that were set on the handle
being in effect.
'''
__pcap_check_retcode(hpcap, _pcap.pcap_active(hpcap))
| public0821/libpcapy | libpcapy/libpcap.py | Python | apache-2.0 | 7,180 |
"""Handle communication with any Deep or IcePAP device"""
# Standard modules
import string
import time
import numpy
import sys
import pdb
from threading import Lock #import gevent
# DEEP modules
from . import log
# Get python modules to communicate with an DEEP device
from sockdeep import SockDeep
from sldeep import SLDeep
# End of Command character and other special ones
COMM_EOL = "\n"
COMM_ACK = "#"
COMM_REQ = "?"
COMM_ADR = ":"
COMM_BIN = "*"
COMM_MLI = "$"
# Device long answer timeout in seconds
COMM_LONG_TIMEOUT = 20
# Device generic command
COMM_ALIVE_CMD = "?PING"
COMM_ALIVE_ICECMD = "?_SOCKPING"
COMM_ALIVE_ANS = "OK"
# Binary protocol
BIN_HEAD_SIGNATURE = 0xa5a50000
BIN_HEAD_ICESIGNATURE = 0xa5aa555a
BIN_HEAD_SIGNMASK = 0xffff0000
BIN_HEAD_NOCHECKSUM = 0x00000010
BIN_HEAD_BIG_ENDIAN = 0x00000020
BIN_HEAD_UNITMASK = 0x0000000f
# Binary value type given in bytes per single value
BIN_8 = 1
BIN_16 = 2
BIN_32 = 4
BIN_64 = 8
#
#
#
class DeviceError(Exception):
#
#
def __init__(self, device, message):
Exception.__init__(self, device.hostname() + ': ' + message)
#
#
#
class DeepDevice(object):
_icepapmode = False
#
#
def __init__(self, dev, argin_str="", timeout=None):
# parse options given at object creation
argins = string.split(argin_str)
for argin in argins:
try:
opt,val = string.split(argin,"=")
except:
self._syntax_error("invalid option: \"%s\""%argin)
opt=string.lower(opt)
if opt.startswith("mode"):
if val.lower() == "icepap":
self._icepapmode = True
else:
self._syntax_error("invalid communication mode: \"%s\""%val)
elif opt.startswith("verb"):
self.set_verbose(int(val))
else:
self._syntax_error("unknown option: \"%s\""%argin)
log.trace("object created, device: \"%s\""%dev)
if self._icepapmode:
log.trace("using IcePAP compatibility mode")
self.set_debug_mode(False)
# TODO: try to guess if it is an IP or an SL to avoid
# to create socket connection and getting socket timeout
try:
comm_dev = SockDeep(dev, log.level(), timeout)
except:
raise
try:
comm_dev = SLDeep(dev, log.level(), timeout)
except:
msg = "unsupported communication device (SL or socket): \"%s\""%dev
log.error(msg, exception=IOError)
self.comm_dev = comm_dev
self._hostname = dev
self._to_be_flushed = False
try:
self.commands = self._getcommandlist()
except:
raise
msg = "Not active device: \"%s\""%dev
log.error(msg, exception=IOError)
#
#
def close(self):
self.comm_dev.close()
#
#
def set_debug_mode(self, dbgmode):
self.debug_mode = bool(dbgmode)
#
#
def set_verbose(self, val):
log.level(val)
#
#
def get_verbose(self):
return(log.level())
#
#
def log(self, msg, verb):
log.log(verb, msg)
#
#
def hostname(self):
return(self._hostname)
#
#
def _getcommandlist(self):
if self._icepapmode:
answ = self.command("?HELP").splitlines()
answ = [s for line in answ for s in line.split()]
else:
answ = self.command("?HELP ALL").splitlines()
answ = [s.split(":")[0].strip() for s in answ if s.rfind(":") >= 0]
return answ
#
#
def getcommandlist(self):
return self.commands
#
#
def isvalidcommand(self, comm):
if comm.split()[0].upper() in self.commands:
return True
else:
return False
#
# Log the error message and raises an exception
#
def _syntax_error(self, msg):
log.error(msg, exception=SyntaxError)
#
# Command Syntax: [#][<address>:]<keyword> [param1 [param2] etc][\r]\n
# Keyword Syntax: [?|*|?*]<string>
#
def __cmd_type(self, str_cmd, chkanswer):
# will return a list
cmd_type = []
# consider only the command not its params
cmd = str_cmd.split()[0].upper()
# check if acknowledge is requested
if cmd[0] == COMM_ACK:
ack = True
cmd = cmd.lstrip(COMM_ACK)
else:
ack = False
# check for an address field
spcmd = cmd.split(COMM_ADR)
n = len(spcmd)
if n == 1:
cmd_addr = None
prefix = spcmd[0]
elif n == 2:
cmd_addr = spcmd[0]
prefix = spcmd[1]
else:
self._syntax_error("too many \"%s\" chars"%(COMM_ADR))
if not prefix: # missing keyword
self._syntax_error("missing command keyword")
elif prefix[0] == COMM_REQ: # this is a query
cmd_type.append("req")
cmd_key = prefix[1:]
if cmd_addr == "": # if broadcast, error
self._syntax_error("queries cannot be broadcasted")
elif ack: # if acknowledge, cancel answer check
chkanswer = False
else:
chkanswer = True
else:
cmd_key = prefix
if ack:
cmd_type.append("ack")
# check if binary
if cmd_key[0] == COMM_BIN: # binary data
cmd_type.append("bin")
cmd_key = cmd_key[1:]
# minimum check on characters for address and command fields
if not cmd_key.replace("_", "").isalnum():
self._syntax_error("invalid character in command %s"%(prefix))
if cmd_addr and not cmd_addr.replace("_", "").isalnum():
self._syntax_error("invalid character in address %s"%(cmd_addr))
if cmd_addr and self._icepapmode:
prefix = cmd_addr + COMM_ADR + prefix
# normal end
msg = "command: %s type: %s"%(prefix, cmd_type)
log.data(msg)
return prefix, cmd_type, chkanswer
#
#
def __dump_bin(self, data):
"""Dump in hexa the values of the NumPy array of type from 8 to 64bit"""
# give up if nothing to do
if(log.level() < log.DBG_DATA):
return
# guess information on data to dump
bufsize = data.nbytes # specfic to NumPy arrays
datasize = len(data) # number of values
datatype = bufsize/datasize # number of bytes per individual value
# minium check
if datatype not in [BIN_8, BIN_16, BIN_32, BIN_64]:
self._syntax_error("unsupported data type: BIN_%dbits"%(datatype*8))
#
print " binary data: %d bytes"%bufsize
lnv = 80 / (datatype*2 + 4)
for j in range(1,datasize+1):
if datatype == BIN_8:
hexstr = "%02x"%(data[j-1] & 0xff)
# NOTE MP 14Jun2013: problem with string formating in Python <=2.6.5
# with integers larger than 32bit. Therefore handle them by hand
if datatype >= BIN_16:
hexstr = "%04x"%(data[j-1] & 0xffff)
if datatype >= BIN_32:
hexstr = "%04x"%((data[j-1]>>16) & 0xffff) + hexstr
if datatype >= BIN_64:
hexstr = "%04x"%((data[j-1]>>32) & 0xffff) + hexstr
hexstr = "%04x"%((data[j-1]>>48) & 0xffff) + hexstr
hexstr = " 0x" + hexstr
sys.stdout.write(hexstr)
if not j%lnv:
sys.stdout.write("\n")
#
sys.stdout.write("\n")
sys.stdout.flush()
#
#
def __wr(self, str_cmd, has_reply):
if self._to_be_flushed:
self.flush()
cmd = str_cmd + "\n"
# send the command passed as string
log.trace("===> [%s]"%str_cmd)
if has_reply:
return self.comm_dev.request(cmd)
else:
self.comm_dev.puts(cmd)
#
#
def __wr_bin(self, str_cmd, in_data):
self.__wr(str_cmd)
# prepare binary protocol header
header = numpy.array([0,0,0],numpy.uint32)
if not self._icepapmode:
header[0] = BIN_HEAD_SIGNATURE | (in_data.itemsize & BIN_HEAD_UNITMASK)
header[1] = len(in_data)
header[2] = numpy.uint32(numpy.sum(in_data))
# for IcePAP the protocol is different
else:
header[0] = BIN_HEAD_ICESIGNATURE
header[1] = in_data.nbytes/2 # data length given in 16bit words
org_dtype = in_data.dtype
in_data.dtype = numpy.uint16 # checksum calculate over 16bits words
header[2] = numpy.uint32(numpy.sum(in_data))
in_data.dtype = org_dtype # avoid client panic
# sent header (the float() is needed to handle the unsigned long)
log.data("header field: 0x%08x"%float(header[0]))
log.data("data len field: 0x%08x"%float(header[1]))
log.data("checksum field: 0x%08x"%float(header[2]))
# header must be always sent as little-endian (mandatory)
# binary data is convenient (and not to use the BIG_ENDIAN flag)
if sys.byteorder == "big":
#header[0] |= BIN_HEAD_BIG_ENDIAN
header.byteswap(True) # convert header in little endian
bin_block = in_data.byteswap(False) # put data in little endian too
else:
bin_block = in_data
# send the header and the binary block
self.__dump_bin(bin_block)
self.comm_dev.puts(header)
self.comm_dev.puts(bin_block)
#
#
def __rd_bin(self):
# load binary protocol header
header = numpy.fromstring(self.comm_dev.getchar(3 * 4), numpy.uint32)
if sys.byteorder == "big":
header.byteswap(True) # convert header from little endian
# received header (the float() is needed to handle the unsigned long)
log.data("header field: 0x%08x"%float(header[0]))
log.data("data len field: 0x%08x"%float(header[1]))
log.data("checksum field: 0x%08x"%float(header[2]))
# retrieve information from binary protocol header
if not self._icepapmode:
if (header[0] & BIN_HEAD_SIGNMASK) != BIN_HEAD_SIGNATURE:
raise IOError, "bad binary data header"
itemsize = header[0] & BIN_HEAD_UNITMASK
usechksum = not (header[0] & BIN_HEAD_NOCHECKSUM)
bigendian = header[0] & BIN_HEAD_BIG_ENDIAN
# for IcePAP the protocol is different
else:
if header[0] != BIN_HEAD_ICESIGNATURE:
raise IOError, "bad IcePAP binary data header"
itemsize = 2 # only 16bit words can be transfert
usechksum = True # checksum is mandatory
bigendian = False
size = header[1]
checksum = header[2]
bin_block = numpy.fromstring(self.comm_dev.getchar(size * itemsize), \
{1:numpy.uint8, 2:numpy.uint16, 4:numpy.uint32, 8:numpy.uint64}[itemsize])
if (bigendian and sys.byteorder == "little") or \
(not bigendian and sys.byteorder == "big"):
bin_block.byteswap(True) # convert data into native ordering
#
calc_checksum = long(bin_block.sum()) & 0xffffffff # checksum to 32 bits
if usechksum and calc_checksum != checksum:
raise IOError, "Bad binary checksum"
# for IcePAP force return data type to 8bits rather than 16bits which
# is meaningless for the client
if self._icepapmode:
bin_block.dtype=numpy.uint8
# normal end
self.__dump_bin(bin_block)
return bin_block
#
#
#
#
def __command(self, str_cmd, in_data = None, chkanswer = False): #, lock=Lock()):
# remove any useless ending white spaces and eols
cmd = str_cmd.strip(" \n\r")
# some parsing to guess what to do
prefix, cmd_type, chkanswer = self.__cmd_type(cmd, chkanswer)
# by default no binary data returned
ans_data = None
if True:
# minimum check if binary download is requested
if in_data == None:
if "bin" in cmd_type and not "req" in cmd_type:
self._syntax_error("binary data is missing")
else:
reply = self.__wr(cmd, "req" in cmd_type or "ack" in cmd_type)
else:
if not "bin" in cmd_type:
self._syntax_error("downloading binary with a non binary command")
elif "req" in cmd_type:
self._syntax_error("downloading binary with a query binary command")
else:
self.__wr_bin(cmd, in_data)
if "ack" in cmd_type:
self.comm_dev.set_timeout(COMM_LONG_TIMEOUT)
if "req" in cmd_type or "ack" in cmd_type:
try:
ans = reply.get() #self.__rd_ascii(prefix)
except RuntimeError, msg:
if chkanswer:
raise DeviceError(self, msg)
else:
if "bin" in cmd_type:
if "req" in cmd_type:
ans_data = self.__rd_bin()
return ans, ans_data
else:
self.comm_dev.set_timeout()
return ans
else:
return ans
#
#
def command(self, str_cmd, in_data = None):
try:
return self.__command(str_cmd, in_data, False)
except IOError:
self._to_be_flushed = True
raise
except KeyboardInterrupt:
self._to_be_flushed = True
if self.debug_mode:
print "Keyboard interrupt"
raise
#
#
def ackcommand(self, str_cmd, in_data = None):
str_cmd = str_cmd.strip()
if str_cmd[0] != "#" and str_cmd[0] != "?":
str_cmd = "#" + str_cmd
try:
return self.__command(str_cmd, in_data, True)
except IOError:
self._to_be_flushed = True
raise
except KeyboardInterrupt:
self._to_be_flushed = True
if self.debug_mode:
print "Keyboard interrupt"
raise
#
#
def flush(self):
if self.debug_mode:
print "Flushing ..."
self.comm_dev.flush()
self._to_be_flushed = False
#
#
def isalive(self):
# by default, no body is there
alive = False
# try a generic command
if self._icepapmode:
cmd = COMM_ALIVE_ICECMD
else:
cmd = COMM_ALIVE_CMD
try:
ans = self.command(cmd)
except:
self._syntax_error("isalive command \"%s\" failed"%cmd)
# at this point we could consider a usable device but be paranoic
if (string.find(ans,"OK") != -1):
alive = True
# normal end
return alive
| esrf-emotion/emotion | emotion/controllers/libicepap/deep/device.py | Python | gpl-2.0 | 13,738 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scores', '0004_player_last10avg'),
]
operations = [
migrations.RenameField(
model_name='player',
old_name='last10avg',
new_name='last10minutes',
),
migrations.AddField(
model_name='player',
name='last10total',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='player',
name='totalminutes',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='player',
name='totalpoints',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| takumiharada/fantasypl | scores/migrations/0005_auto_20150624_0418.py | Python | mit | 968 |
# -*- coding: utf-8 -*-
"""Fallback controller."""
from depotexample.lib.base import BaseController
from tg import abort
__all__ = ['TemplateController']
class TemplateController(BaseController):
"""
The fallback controller for depotexample.
By default, the final controller tried to fulfill the request
when no other routes match. It may be used to display a template
when all else fails, e.g.::
def view(self, url):
return render('/%s' % url)
Or if you're using Mako and want to explicitly send a 404 (Not
Found) response code when the requested template doesn't exist::
import mako.exceptions
def view(self, url):
try:
return render('/%s' % url)
except mako.exceptions.TopLevelLookupException:
abort(404)
"""
def view(self, url):
"""Abort the request with a 404 HTTP status code."""
abort(404)
| rlam3/depot | examples/turbogears/depotexample/controllers/template.py | Python | mit | 984 |
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import json
import os
import pdb
import pickle
import sys
import h5py
import numpy as np
import pandas as pd
import pysam
import pyBigWig
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import bed
from basenji import dna_io
from basenji import seqnn
from basenji import stream
'''
basenji_predict_bed.py
Predict sequences from a BED file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-b', dest='bigwig_indexes',
default=None, help='Comma-separated list of target indexes to write BigWigs')
parser.add_option('-e', dest='embed_layer',
default=None, type='int',
help='Embed sequences using the specified layer index.')
parser.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-g', dest='genome_file',
default=None,
help='Chromosome length information [Default: %default]')
parser.add_option('-l', dest='site_length',
default=None, type='int',
help='Prediction site length. [Default: model seq_length]')
parser.add_option('-o', dest='out_dir',
default='pred_out',
help='Output directory [Default: %default]')
# parser.add_option('--plots', dest='plots',
# default=False, action='store_true',
# help='Make heatmap plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
parser.add_option('-s', dest='sum',
default=False, action='store_true',
help='Sum site predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--species', dest='species',
default='human')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
(options, args) = parser.parse_args()
if len(args) == 2:
model_file = args[0]
bed_file = args[1]
elif len(args) == 4:
# multi worker
options_pkl_file = args[0]
model_file = args[1]
bed_file = args[2]
worker_index = int(args[3])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameter and model files and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
if options.bigwig_indexes is not None:
options.bigwig_indexes = [int(bi) for bi in options.bigwig_indexes.split(',')]
else:
options.bigwig_indexes = []
if len(options.bigwig_indexes) > 0:
bigwig_dir = '%s/bigwig' % options.out_dir
if not os.path.isdir(bigwig_dir):
os.mkdir(bigwig_dir)
#################################################################
# read parameters and collet target information
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_slice = targets_df.index
#################################################################
# setup model
seqnn_model = tf.saved_model.load(model_file).model
# query num model targets
seq_length = seqnn_model.predict_on_batch.input_signature[0].shape[1]
null_1hot = np.zeros((1,seq_length,4))
null_preds = seqnn_model.predict_on_batch(null_1hot)
null_preds = null_preds[options.species].numpy()
_, preds_length, preds_depth = null_preds.shape
# hack sizes
preds_window = 128
seq_crop = (seq_length - preds_length*preds_window) // 2
#################################################################
# sequence dataset
if options.site_length is None:
options.site_length = preds_window*preds_length
print('site_length: %d' % options.site_length)
# construct model sequences
model_seqs_dna, model_seqs_coords = bed.make_bed_seqs(
bed_file, options.genome_fasta,
seq_length, stranded=False)
# construct site coordinates
site_seqs_coords = bed.read_bed_coords(bed_file, options.site_length)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(model_seqs_dna), options.processes+1, dtype='int')
model_seqs_dna = model_seqs_dna[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
model_seqs_coords = model_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
site_seqs_coords = site_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_seqs = len(model_seqs_dna)
#################################################################
# setup output
assert(preds_length % 2 == 0)
preds_mid = preds_length // 2
assert(options.site_length % preds_window == 0)
site_preds_length = options.site_length // preds_window
assert(site_preds_length % 2 == 0)
site_preds_start = preds_mid - site_preds_length//2
site_preds_end = site_preds_start + site_preds_length
# initialize HDF5
out_h5_file = '%s/predict.h5' % options.out_dir
if os.path.isfile(out_h5_file):
os.remove(out_h5_file)
out_h5 = h5py.File(out_h5_file, 'w')
# create predictions
if options.sum:
out_h5.create_dataset('preds', shape=(num_seqs, preds_depth), dtype='float16')
else:
out_h5.create_dataset('preds', shape=(num_seqs, site_preds_length, preds_depth), dtype='float16')
# store site coordinates
site_seqs_chr, site_seqs_start, site_seqs_end = zip(*site_seqs_coords)
site_seqs_chr = np.array(site_seqs_chr, dtype='S')
site_seqs_start = np.array(site_seqs_start)
site_seqs_end = np.array(site_seqs_end)
out_h5.create_dataset('chrom', data=site_seqs_chr)
out_h5.create_dataset('start', data=site_seqs_start)
out_h5.create_dataset('end', data=site_seqs_end)
#################################################################
# predict scores, write output
# define sequence generator
def seqs_gen():
for seq_dna in model_seqs_dna:
yield dna_io.dna_1hot(seq_dna)
# initialize predictions stream
preds_stream = stream.PredStreamSonnet(seqnn_model, seqs_gen(),
rc=options.rc, shifts=options.shifts, species=options.species)
for si in range(num_seqs):
preds_seq = preds_stream[si]
# slice site
preds_site = preds_seq[site_preds_start:site_preds_end,:]
# write
if options.sum:
out_h5['preds'][si] = preds_site.sum(axis=0)
else:
out_h5['preds'][si] = preds_site
# write bigwig
for ti in options.bigwig_indexes:
bw_file = '%s/s%d_t%d.bw' % (bigwig_dir, si, ti)
bigwig_write(preds_seq[:,ti], model_seqs_coords[si], bw_file,
options.genome_file, seq_crop)
# close output HDF5
out_h5.close()
def bigwig_open(bw_file, genome_file):
""" Open the bigwig file for writing and write the header. """
bw_out = pyBigWig.open(bw_file, 'w')
chrom_sizes = []
for line in open(genome_file):
a = line.split()
chrom_sizes.append((a[0], int(a[1])))
bw_out.addHeader(chrom_sizes)
return bw_out
def bigwig_write(signal, seq_coords, bw_file, genome_file, seq_crop=0):
""" Write a signal track to a BigWig file over the region
specified by seqs_coords.
Args
signal: Sequences x Length signal array
seq_coords: (chr,start,end)
bw_file: BigWig filename
genome_file: Chromosome lengths file
seq_crop: Sequence length cropped from each side of the sequence.
"""
target_length = len(signal)
# open bigwig
bw_out = bigwig_open(bw_file, genome_file)
# initialize entry arrays
entry_starts = []
entry_ends = []
# set entries
chrm, start, end = seq_coords
preds_pool = (end - start - 2 * seq_crop) // target_length
bw_start = start + seq_crop
for li in range(target_length):
bw_end = bw_start + preds_pool
entry_starts.append(bw_start)
entry_ends.append(bw_end)
bw_start = bw_end
# add
bw_out.addEntries(
[chrm]*target_length,
entry_starts,
ends=entry_ends,
values=[float(s) for s in signal])
bw_out.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| calico/basenji | bin/sonnet_predict_bed.py | Python | apache-2.0 | 9,710 |
#!/usr/bin/python3.3
import sys
print(sys.path)
| eroicaleo/LearningPython | ch20/script2.py | Python | mit | 48 |
from charm.core.math.pairing import hashPair as sha2
from charm.schemes.abenc.dabe_aw11 import Dabe
from charm.toolbox.ABEncMultiAuth import ABEncMultiAuth
from charm.toolbox.pairinggroup import PairingGroup,GT
from charm.toolbox.symcrypto import AuthenticatedCryptoAbstraction
debug = False
class HybridABEncMA(ABEncMultiAuth):
"""
>>> from charm.toolbox.pairinggroup import PairingGroup,GT
>>> group = PairingGroup('SS512')
>>> dabe = Dabe(group)
Setup master authority.
>>> hyb_abema = HybridABEncMA(dabe, group)
>>> global_parameters = hyb_abema.setup()
Generate attributes for two different sub-authorities:
Johns Hopkins University, and Johns Hopkins Medical Institutions.
>>> jhu_attributes = ['jhu.professor', 'jhu.staff', 'jhu.student']
>>> jhmi_attributes = ['jhmi.doctor', 'jhmi.nurse', 'jhmi.staff', 'jhmi.researcher']
Johns Hopkins sub-authorities master key.
>>> (jhu_secret_key, jhu_public_key) = hyb_abema.authsetup(global_parameters, jhu_attributes)
JHMI sub-authorities master key
>>> (jhmi_secret_key, jhmi_public_key) = hyb_abema.authsetup(global_parameters, jhmi_attributes)
To encrypt messages we need all of the authorities' public keys.
>>> allAuth_public_key = {};
>>> allAuth_public_key.update(jhu_public_key);
>>> allAuth_public_key.update(jhmi_public_key)
An example user, Bob, who is both a professor at JHU and a researcher at JHMI.
>>> ID = "20110615 bob@gmail.com cryptokey"
>>> secrets_keys = {}
>>> hyb_abema.keygen(global_parameters, jhu_secret_key,'jhu.professor', ID, secrets_keys)
>>> hyb_abema.keygen(global_parameters, jhmi_secret_key,'jhmi.researcher', ID, secrets_keys)
Encrypt a message to anyone who is both a profesor at JHU and a researcher at JHMI.
>>> msg = b'Hello World, I am a sensitive record!'
>>> policy_str = "(jhmi.doctor or (jhmi.researcher and jhu.professor))"
>>> cipher_text = hyb_abema.encrypt(global_parameters, allAuth_public_key, msg, policy_str)
>>> hyb_abema.decrypt(global_parameters, secrets_keys, cipher_text)
b'Hello World, I am a sensitive record!'
"""
def __init__(self, scheme, groupObj):
global abencma, group
# check properties (TODO)
abencma = scheme
group = groupObj
def setup(self):
return abencma.setup()
def authsetup(self, gp, attributes):
return abencma.authsetup(gp, attributes)
def keygen(self, gp, sk, i, gid, pkey):
return abencma.keygen(gp, sk, i, gid, pkey)
def encrypt(self, gp, pk, M, policy_str):
if type(M) != bytes and type(policy_str) != str:
raise Exception("message and policy not right type!")
key = group.random(GT)
c1 = abencma.encrypt(gp, pk, key, policy_str)
# instantiate a symmetric enc scheme from this key
cipher = AuthenticatedCryptoAbstraction(sha2(key))
c2 = cipher.encrypt(M)
return { 'c1':c1, 'c2':c2 }
def decrypt(self, gp, sk, ct):
c1, c2 = ct['c1'], ct['c2']
key = abencma.decrypt(gp, sk, c1)
if key is False:
raise Exception("failed to decrypt!")
cipher = AuthenticatedCryptoAbstraction(sha2(key))
return cipher.decrypt(c2)
def main():
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
hyb_abema = HybridABEncMA(dabe, groupObj)
#Setup global parameters for all new authorities
gp = hyb_abema.setup()
#Instantiate a few authorities
#Attribute names must be globally unique. HybridABEncMA
#Two authorities may not issue keys for the same attribute.
#Otherwise, the decryption algorithm will not know which private key to use
jhu_attributes = ['jhu.professor', 'jhu.staff', 'jhu.student']
jhmi_attributes = ['jhmi.doctor', 'jhmi.nurse', 'jhmi.staff', 'jhmi.researcher']
(jhuSK, jhuPK) = hyb_abema.authsetup(gp, jhu_attributes)
(jhmiSK, jhmiPK) = hyb_abema.authsetup(gp, jhmi_attributes)
allAuthPK = {}; allAuthPK.update(jhuPK); allAuthPK.update(jhmiPK)
#Setup a user with a few keys
bobs_gid = "20110615 bob@gmail.com cryptokey"
K = {}
hyb_abema.keygen(gp, jhuSK,'jhu.professor', bobs_gid, K)
hyb_abema.keygen(gp, jhmiSK,'jhmi.researcher', bobs_gid, K)
msg = b'Hello World, I am a sensitive record!'
size = len(msg)
policy_str = "(jhmi.doctor OR (jhmi.researcher AND jhu.professor))"
ct = hyb_abema.encrypt(allAuthPK, gp, msg, policy_str)
if debug:
print("Ciphertext")
print("c1 =>", ct['c1'])
print("c2 =>", ct['c2'])
orig_msg = hyb_abema.decrypt(gp, K, ct)
if debug: print("Result =>", orig_msg)
assert orig_msg == msg, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
if __name__ == "__main__":
debug = True
main()
| JHUISI/charm | charm/adapters/dabenc_adapt_hybrid.py | Python | lgpl-3.0 | 4,900 |
"""
Make graphs (lifecycles, ...)
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import json
from collections import OrderedDict
try:
import pygraphviz as pgv
except ImportError:
print('(optional) install pygraphviz to generate graphs')
sys.exit(0)
from iceprod.server import get_pkgdata_filename
def main():
table_filename = get_pkgdata_filename('iceprod.server','data/etc/db_config.json')
db_tables = json.load(open(table_filename),object_pairs_hook=OrderedDict)
for k in db_tables['status_graphs']:
outfile_name = os.path.join('static','lifecycle_'+k+'.png')
if os.path.exists(outfile_name) and os.path.getmtime(outfile_name) > os.path.getmtime(table_filename):
print('graph',outfile_name,'already exists. skipping')
continue
G = pgv.AGraph(strict=False,directed=True)
G.add_nodes_from(db_tables['status_options'][k])
for row in db_tables['status_graphs'][k]:
if row[-1] == 'std':
c = 'cornflowerblue'
elif row[-1] == 'auto':
c = 'cyan2'
elif row[-1] == 'debug':
c = 'chartreuse2'
elif row[-1] == 'manual':
c = 'firebrick2'
G.add_edge(row[0],row[1],color=c)
G.draw(outfile_name, prog='dot')
if __name__ == '__main__':
main()
| WIPACrepo/iceprod | docs/make_graphs.py | Python | mit | 1,446 |
import aravis
import cv2
import time
if __name__ == "__main__":
ar = aravis.Aravis()
cam = ar.get_camera("Prosilica-02-2110A-06145")
#cam = ar.get_camera("AT-Automation Technology GmbH-20805103")
width, height = cam.get_sensor_size()
cam.set_region(0, 0, width, height)
x, y, width, height = cam.get_region()
print("Camera model: ", cam.get_model_name())
print("Vendor Name: ", cam.get_vendor_name())
print("Device id: ", cam.get_device_id())
print("Image size: ", width, ",", height)
print("Sensor size: ", cam.get_sensor_size())
print("Exposure: ", cam.get_exposure_time())
print("Frame rate: ", cam.get_frame_rate())
print("Payload: ", cam.get_payload())
print("AcquisitionMode: ", cam.get_string_feature("AcquisitionMode"))
print("Acquisition vals: ", cam.get_enum_vals("AcquisitionMode"))
print("TriggerSource: ", cam.get_string_feature("TriggerSource"))
print("TriggerSource vals: ", cam.get_enum_vals("TriggerSource"))
print("TriggerMode: ", cam.get_string_feature("TriggerMode"))
print("Bandwidth: ", cam.get_integer_feature("StreamBytesPerSecond"))
print("PixelFormat: ", cam.get_string_feature("PixelFormat"))
print("PacketSize: ", cam.get_integer_feature("GevSCPSPacketSize"))
cam.set_integer_feature("GevSCPSPacketSize", 1500)
cam.setup_stream()
cam.start_acquisition_continuous()
cv2.namedWindow('capture')
try:
while True:
frame = None
while frame == None:
frame = cam.try_get_frame()
time.sleep(0.001)
print(time.time())
cv2.imshow("capture", frame)
cv2.waitKey(1)
except:
cam.stop_acquisition()
cam.cleanup()
raise
| oroulet/python-aravis | ctypes-based/prosilica-streaming.py | Python | gpl-3.0 | 1,778 |
import os
import unittest
from vsg.rules import assert_statement
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_002_test_input.vhd'))
dIndentMap = utils.read_indent_file()
dIndentMap = utils.read_indent_file()
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_002_test_input.fixed.vhd'), lExpected)
class test_assert_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
self.oFile.set_indent_map(dIndentMap)
self.oFile.set_indent_map(dIndentMap)
def test_rule_002(self):
oRule = assert_statement.rule_002()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'assert')
self.assertEqual(oRule.identifier, '002')
self.assertEqual(oRule.groups, ['structure'])
lExpected = [6, 11, 18, 23, 34]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_002(self):
oRule = assert_statement.rule_002()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/assert_statement/test_rule_002.py | Python | gpl-3.0 | 1,409 |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# $Id: xmlrpc_curl.py,v 1.13 2007/03/04 19:26:59 kjetilja Exp $
# We should ignore SIGPIPE when using pycurl.NOSIGNAL - see
# the libcurl tutorial for more info.
try:
import signal
from signal import SIGPIPE, SIG_IGN
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import xmlrpclib, pycurl
class CURLTransport(xmlrpclib.Transport):
"""Handles a HTTP transaction to an XML-RPC server."""
xmlrpc_h = [ "Content-Type: text/xml" ]
def __init__(self, username=None, password=None):
self.c = pycurl.Curl()
self.c.setopt(pycurl.POST, 1)
self.c.setopt(pycurl.NOSIGNAL, 1)
self.c.setopt(pycurl.CONNECTTIMEOUT, 30)
self.c.setopt(pycurl.HTTPHEADER, self.xmlrpc_h)
if username != None and password != None:
self.c.setopt(pycurl.USERPWD, '%s:%s' % (username, password))
self._use_datetime = False
def request(self, host, handler, request_body, verbose=0):
b = StringIO()
self.c.setopt(pycurl.URL, 'http://%s%s' % (host, handler))
self.c.setopt(pycurl.POSTFIELDS, request_body)
self.c.setopt(pycurl.WRITEFUNCTION, b.write)
self.c.setopt(pycurl.VERBOSE, verbose)
self.verbose = verbose
try:
self.c.perform()
except pycurl.error, v:
raise xmlrpclib.ProtocolError(
host + handler,
v[0], v[1], None
)
b.seek(0)
return self.parse_response(b)
if __name__ == "__main__":
## Test
server = xmlrpclib.ServerProxy("http://betty.userland.com",
transport=CURLTransport())
print server
try:
print server.examples.getStateName(41)
except xmlrpclib.Error, v:
print "ERROR", v
| kerneltravel/pycurl | examples/xmlrpc_curl.py | Python | lgpl-2.1 | 1,958 |
# -*- encoding: utf-8 -*-
from osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'fullnum': fields.char('num', size=40),
'sales_ids': fields.many2many('res.users', 'rel_partner_user','partner_id','user_id', '负责业务员', help='内部负责业务员. 设置邮件地址,以备通知使用.'),
}
class res_user(osv.osv):
_inherit = 'res.users'
_columns = {
'jid': fields.char('num', size=40),
'partner_ids':fields.many2many('res.partner', 'rel_partner_user', 'user_id', 'partner_id', '负责客户', help='负责客户'),
}
class product_uom(osv.osv):
_inherit = 'product.uom'
_columns = {
'fullnum': fields.char('num', size=40),
}
class product_product(osv.osv):
_inherit = 'product.product'
_columns = {
'fullnum': fields.char('num', size=40),
'source':fields.selection([(u'塑胶事业部',u'塑胶事业部'), (u'安全帽事业部',u'安全帽事业部'),
(u'玻璃事业部',u'玻璃事业部'), (u'茶叶事业部',u'茶叶事业部'),(u'真空事业部',u'真空事业部'),(u'塑胶制品',u'塑胶制品'), (u'财务部',u'财务部'),
(u'其他',u'其他')],'事业部', required=True),
}
class product_category(osv.osv):
_inherit = 'product.category'
_columns = {
'fullnum': fields.char('num', size=40),
}
class fg_report_horizontal(osv.osv_memory):
_name = "fg_data.report.horizontal"
_columns = {
'name':fields.char('项目', size=40),
'desc':fields.char('说明', size=40),
'value': fields.float('数据', digits=(12, 2)),
} | Johnzero/OE7 | OE-debug文件/PyWapFetion-master/extra.py | Python | agpl-3.0 | 1,703 |
# -*- coding: utf-8 -*-
#
# allocations/entries.py is part of MetaDoc (Client).
#
# All of MetaDoc is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# MetaDoc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MetaDoc. If not, see <http://www.gnu.org/licenses/>.
#
import metaelement
class AllocationEntry(metaelement.MetaElement):
"""AllocationEntry - Allocation for specific projects. """
xml_tag_name = "all_entry"
def __init__(self, account_nmb, volume, metric, all_class, period):
""" Defines attributes for all_entry XML elements.
@param account_nmb: Account number for allocation.
@type account_nmb: String
@param volume: The amount of parameter metric.
@type volume: String
@param metric: Measurement of parameter volume.
@type metric: String
@param all_class: Allocations class.
@type all_class: String, either "pri" or "nonpri", for prioritized and
non-prioritized allocation.
@param period: Period of allocation.
@type period: String on form "YYYY.P" where P is the year's period.
"""
attributes = {
'account_nmb': account_nmb,
'volume': volume,
'metric': metric,
'all_class': all_class,
'period': period,
}
self.legal_metric = ('hours', 'mb',)
self.legal_all_class = ('pri', 'nonpri',)
super(AllocationEntry, self).__init__(AllocationEntry.xml_tag_name, attributes)
def clean_metric(self, metric):
"""Checks for legal values of metric.
Raises L{IllegalAttributeValueError} on illegal metric value.
@param metric: Metric for allocation
@type metric: String
@return: String
"""
self._clean_allowed_values(metric, self.legal_metric, 'metric', self.xml_tag_name, False)
return metric
def clean_all_class(self, all_class):
"""Checks for legal values of all_class.
Raises L{IllegalAttributeValueError} on illegal all_class value.
@param all_class: Allocation class of allocation
@type all_class: String
@return: String
"""
self._clean_allowed_values(all_class, self.legal_all_class, 'all_class', self.xml_tag_name, False)
return all_class
| henrikau/metadoc | client/allocations/entries.py | Python | gpl-3.0 | 2,825 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# This file is part of qmljs, the QML/JS language support plugin for KDevelop
# Copyright (c) 2014 Denis Steckelmacher <steckdenis@yahoo.fr>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License or (at your option) version 3 or any later version
# accepted by the membership of KDE e.V. (or its successor approved
# by the membership of KDE e.V.), which shall act as a proxy
# defined in Section 14 of version 3 of the license.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from jsgenerator import *
from common import *
# Print the license of the generated file (the same as the one of this file)
license()
basicTypes(globals())
require('stream')
require('event')
_object = 'new Object()'
_function = 'function(){}'
_stream = 'new stream.Writable()'
Module().members(
F('new Interface()', 'createInterface', ('options', _object)),
Class('Interface').prototype('event.EventEmitter').members(
F(_void, 'setPrompt', ('prompt', _string), ('length', _int)),
F(_void, 'prompt', ('preserveCursor', _bool)),
F(_void, 'question', ('query', _string), ('callback', _function)),
F(_void, 'pause'),
F(_void, 'resume'),
F(_void, 'close'),
F(_void, 'write', ('data', _string), ('key', _object))
),
F(_void, 'cursorTo', ('stream', _stream), ('x', _int), ('y', _int)),
F(_void, 'moveCursor', ('stream', _stream), ('dx', _int), ('dy', _int)),
F(_void, 'clearLine', ('stream', _stream), ('dir', _int)),
F(_void, 'clearScreenDown', ('stream', _stream))
).print()
| mali/kdevelop | languages/qmljs/nodejsmodules/readline.py | Python | gpl-2.0 | 2,074 |
# pylint: disable = blacklisted-name, missing-docstring, useless-return, misplaced-comparison-constant, invalid-name, no-self-use, line-too-long, useless-object-inheritance
def foo():
return None
def goo():
return None
if foo == 786: # [comparison-with-callable]
pass
if 666 == goo: # [comparison-with-callable]
pass
if foo == goo:
pass
if foo() == goo():
pass
class FakeClass(object):
def __init__(self):
self._fake_prop = 'fake it till you make it!!'
def fake_method(self):
return '666 - The Number of the Beast'
@property
def fake_property(self):
return self._fake_prop
@fake_property.setter
def fake_property(self, prop):
self._fake_prop = prop
obj1 = FakeClass()
obj2 = FakeClass()
if obj1.fake_method == obj2.fake_method:
pass
if obj1.fake_property != obj2.fake_property: # property although is function but is called without parenthesis
pass
if obj1.fake_method != foo:
pass
if obj1.fake_method != 786: # [comparison-with-callable]
pass
if obj1.fake_method != obj2.fake_property: # [comparison-with-callable]
pass
if 666 == 786:
pass
a = 666
b = 786
if a == b:
pass
| kczapla/pylint | pylint/test/functional/comparison_with_callable.py | Python | gpl-2.0 | 1,204 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (c) 2014, Kersten Doering <kersten.doering@gmail.com>, Bjoern Gruening <bjoern.gruening@gmail.com>
"""
#Kersten Doering 04.06.2014
#check https://github.com/miracle2k/xappy/blob/master/docs/introduction.rst for nice examples
import xappy
searchConn = xappy.SearchConnection("xapian/xapian2015")
searchConn.reopen()
#########################
querystring = "pancreatic"
q = searchConn.query_field('title',querystring)
print "search query: ", q
#save all machting documents in "results" (starting with rank 0 - check help documentation of function "search")
results = searchConn.search(q, 0, searchConn.get_doccount())
print "number of matches: ", results.matches_estimated
### debug: ###
#print first 5 titles with highlight function and save first 1000 titles in an HTML file
#print "### first 5 hits: ###"
#print "Rank\tPubMed-ID\tTitle (query term highlighted)"
#for index,result in enumerate(results):
# if "<b>" in results.get_hit(index).highlight('title')[0]:
# print index, "\t", result.id, "\t", results.get_hit(index).highlight('title')[0]
# else:
# print resuld.id, "does not contain a highlighted term"
# if index > 5:
# break
#open HTML file
outfile = open("Xapian_query_results.html","w")
#document header
start_string = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head>
<meta http-equiv="content-type" content="text/html; charset=windows-1252">
<title>Xapian_query_results</title>
</head>
<body>
<table border="1" width="100%">
<tbody><tr>
<th>Rank</th>
<th>PubMed-ID</th>
<th>Title (query term highlighted)</th>
</tr>
"""
#string for finishing HTML document
end_string = """
</tbody></table>
</body></html>
"""
#write header
outfile.write(start_string)
print "### save first 1000 hits in Xapian_query_results.html ###"
#write the first 1000 PubMed-IDs and titles with term "pancreatic" or stem "pancreat"
for index,result in enumerate(results):
outfile.write("<tr><td>" + str(index) + "</td><td>" + result.id + "</td><td>" + results.get_hit(index).highlight('title')[0] +"</td></tr>")
if index == 999:
break
#write string for finishing HTML document
outfile.write(end_string)
#close file connection
outfile.close()
#close connection to Xapian database
#searchConn.close()
| telukir/PubMed2Go | full_text_index/search_title.py | Python | isc | 2,405 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import calcoohija
def modo_operacion(lista2, modelo):
primero = int(lista2[1])
for i in lista2[2:]:
primero = modelo(primero, int(i))
print(primero)
def operando_valido(operador, lista2):
calcular = calcoohija.CalculadoraHija()
try:
modo_operacion(lista2, calcular.libreria[operador])
except KeyError:
sys.exit('Error: Non operation parameter')
if __name__ == '__main__':
fichero = sys.argv[1]
fichero2 = open(fichero, 'r')
texto = fichero2.read()
fichero2.close
lista = texto.split()
for i in lista:
operacion = str(i)
lista2 = operacion.split(',')
operador = lista2[0]
operando_valido(operador, lista2)
| arealg/ptavi-p2 | calcplus.py | Python | gpl-2.0 | 774 |
# -*- coding: utf-8 -*-
'''
Tests neo_utils.core.
@author: Pierre Thibault (pierre.thibault1 -at- gmail.com)
@license: MIT
@since: 2010-11-10
'''
__docformat__ = "epytext en"
import unittest
from neo_utils.core import count
from neo_utils.core import every
from neo_utils.core import inverse_linked_list
from neo_utils.core import Prototype
from neo_utils.core import negate
from neo_utils.core import some
from neo_utils.core import transform
class TestNeoUtils(unittest.TestCase):
"""TestNeoUtils the methods of the module """
EMPTY_LIST = []
ALL_FALSE = [False, 0, []]
ALL_TRUE = [True, 1, -45, (1)]
SOME_TRUE = (0, False, [1], [])
@staticmethod
def indentity(p):
return p
def assert_linked_list_order(self, linked_list, sequence_order):
current_node = linked_list
index = 0
while current_node:
self.assertEqual(current_node, sequence_order[index])
current_node = current_node.next
index += 1
self.assertEqual(index, len(sequence_order))
def test_every(self):
self.assertTrue(every(TestNeoUtils.indentity,
TestNeoUtils.EMPTY_LIST))
self.assertFalse(every(TestNeoUtils.indentity,
TestNeoUtils.ALL_FALSE))
self.assertTrue(every(TestNeoUtils.indentity,
TestNeoUtils.ALL_TRUE))
self.assertFalse(every(TestNeoUtils.indentity,
TestNeoUtils.SOME_TRUE))
def test_count(self):
self.assertEqual(0, count(TestNeoUtils.indentity,
TestNeoUtils.EMPTY_LIST))
self.assertEqual(0, count(TestNeoUtils.indentity,
TestNeoUtils.ALL_FALSE))
self.assertEqual(4, count(TestNeoUtils.indentity,
TestNeoUtils.ALL_TRUE))
self.assertEqual(1, count(TestNeoUtils.indentity,
TestNeoUtils.SOME_TRUE))
def test_inverse_linked_list(self):
o1 = Prototype()
o2 = Prototype()
o3 = Prototype()
o1.next = o2
o2.next = o3
o3.next = None
self.assert_linked_list_order(inverse_linked_list(o1), (o3, o2, o1))
self.assert_linked_list_order(inverse_linked_list(None), tuple())
o1 = Prototype()
o2 = Prototype()
o1.next = o2
o2.next = None
self.assert_linked_list_order(inverse_linked_list(o1), (o2, o1))
def test_negate(self):
negation = negate(TestNeoUtils.indentity)
result = []
for i in TestNeoUtils.SOME_TRUE:
result.append(negation(i))
self.assertEqual(result, [True, True, False, True])
def test_some(self):
self.assertFalse(some(TestNeoUtils.indentity,
TestNeoUtils.EMPTY_LIST))
self.assertFalse(some(TestNeoUtils.indentity, TestNeoUtils.ALL_FALSE))
self.assertTrue(some(TestNeoUtils.indentity, TestNeoUtils.ALL_TRUE))
self.assertTrue(some(TestNeoUtils.indentity, TestNeoUtils.SOME_TRUE))
def test_transform(self):
l = [4, 5, 7]
transform(lambda x: x + 1, l)
self.assertEqual(l, [5, 6, 8])
l = []
transform(lambda x: x * x, l)
self.assertEqual(l, [])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'TestNeoUtils.testName']
unittest.main() | Pierre-Thibault/neo-utils | neo_utils/test_core.py | Python | mit | 3,484 |
from pprint import pprint
team_file_2016_Spr = ('/Users/coulter/Desktop/life_notes/2016_q1/' +
'scvl/ratings/teams_for_import_2016-Spr.csv-Sheet1.csv')
def import_teams():
import csv
from copy import deepcopy
divisions = []
with open(team_file_2016_Spr, 'r') as team_file:
team_data = csv.reader(team_file)
team_nums_cols = []
for line in team_data:
# checking for a new division and saving the previous teams
if line[0] != '':
if team_nums_cols != [] and len(team_nums_cols) >= 0:
divisions.append(deepcopy(division_teams))
current_div = int(line[0])
division_teams = []
# checking for new team names
if any(("team" in cell.lower()) for cell in line):
team_nums_cols = collect_team_columns(line)
# assumes that teams are read in order
division_teams += [[] for _ in range(len(team_nums_cols))]
else:
# collecting team-wise data
if team_nums_cols:
for team_idx, column in team_nums_cols:
if line[column].strip() != '':
division_teams[team_idx] += [line[column]]
return divisions
def collect_team_columns(line):
team_nums_cols = []
for idx, cell in enumerate(line):
if 'team' in cell.lower():
cell = cell.lower()
cell = cell.replace('team', '')
cell = cell.replace(' ', '')
team_num = int(cell) - 1 # team 1 is stored internally as 0, etc
team_nums_cols.append((team_num, idx))
return team_nums_cols
def check_if_person_in_division(member, members):
found = False
for div_mem in members:
for team_mem in div_mem:
if member in team_mem:
return True
else:
raise(ValueError('member %s in division %s could not be found in '
'any division' % (member)))
def translate_division(div_team_string):
pass
def import_ratees():
import csv
from copy import deepcopy
ratees_path = '/Users/coulter/Desktop/life_notes/2016_q1/scvl/'
ratees_file = 'Mid-Season-Rerating-List.csv'
ratees = []
with open(ratees_path + ratees_file, 'r') as ratee_file:
ratee_data = csv.DictReader(ratee_file)
ratee_nums_cols = []
for line in ratee_data:
line['name'] = line['PLAYER']
ratees += [line]
def member_debug_report(members):
print('There are {} divisions'.format(len(members)))
for div_idx, div_members in enumerate(members):
print('There are {} teams in division {}'.format(
len(div_members), div_idx))
for team_idx, team in enumerate(div_members):
print('There are {} members in team {} of division {}: {}'.format(
len(team), team_idx, div_idx, ','.join(team)))
if __name__ == '__main__':
divisions = import_teams()
#pprint(divisions)
import_ratees() | widgetOne/league_admin | scheduler/members.py | Python | mit | 3,094 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def add_course_to_tasks(apps, schema_editor):
Task = apps.get_model('course', 'Task')
AssignmentTask = apps.get_model('course', 'AssignmentTask')
for task in Task.objects.all():
for assignment_task in AssignmentTask.objects.filter(task=task).all():
if task.course == None:
task.course = assignment_task.assignment.course
task.save()
elif task.course != assignment_task.assignment.course:
other_task = Task.objects.filter(
name=task.name,
course=assignment_task.assignment.course
).first()
if other_task == None:
other_task = Task.objects.create(
course_id=assignment_task.assignment.course_id,
name=task.name, description=task.description
)
assignment_task.task = other_task
assignment_task.save()
Task.objects.filter(course=None).delete()
class Migration(migrations.Migration):
dependencies = [
('course', '0012_add_course_to_task_step_1'),
]
operations = [
migrations.RunPython(add_course_to_tasks),
]
| wallysalami/gamified-education | course/migrations/0013_add_course_to_task_step_2.py | Python | mit | 1,339 |
__author__ = 'reggie'
import ujson as json
import Queue
import zmq
import time
import pika
import zlib
import base64
import PmkSeed
import PmkBroadcast
import PmkShared
from PmkShared import *
from Queue import *
class rx(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
pass
def dig(self, pkt):
#print "DIG: "+json.dumps(pkt)
#if (pkt[0]["state"] == "TRANSIT") or (pkt[0]["state"] == "NEW"):
# iplugins = PmkSeed.iplugins
# keys = PmkSeed.iplugins.keys
# l = len(pkt)
# func = pkt[l-1]["func"]
# #data = pkt[l-2]["data"]
# if ":" in func:
# func = func.split(":")[1]
# if func in keys():
# klass = iplugins[func]
# klass.look_ahead(pkt)
pass
class InternalDispatch(SThread):
_packed_pkts = 0
def __init__(self, context):
SThread.__init__(self)
self.context = context
pass
def run(self):
rx = self.context.getRx()
#loads = json.loads
keys = PmkSeed.iplugins.keys
iplugins = PmkSeed.iplugins
speedy = self.context.is_speedy()
while 1:
#already in json format
pkt = rx.get(True)
aux = 0
if "aux" in pkt[0].keys():
aux = pkt[0]["aux"]
if aux & TRACER_BIT:
#stat = self.context.get_stat()
#print stat
#continue
pass
if "code" in pkt[0].keys():
seed = base64.decodestring(pkt[0]["code"])
#print "SEED: "+seed
func = self.context.load_seed_from_string(seed)
l = len(pkt)
pkt[l-1]["func"] = func
del pkt[0]["code"]
#logging.debug("Packet received: \n"+pkts)
#pkt = json.loads(pkts)
#pkt = loads(pkts)
if not speedy:
#Check for PACK
if pkt[0]["state"] == "PACK_OK":
#logging.debug("PACK packet: "+pkts)
seed = pkt[0]["last_func"]
if seed in PmkSeed.iplugins.keys():
klass = PmkSeed.iplugins[seed]
klass.pack_ok(pkt)
self._packed_pkts += 1
#logging.debug("PACKED pkts: "+str(self._packed_pkts))
continue
# if pkt[0]["state"] == "MERGE":
# seed = pkt[0]["last_func"]
#
# if seed in PmkSeed.iplugins.keys():
# klass = PmkSeed.iplugins[seed]
# klass.merge(pkt)
# #klass.pack_ok(pkt)
# #self._packed_pkts += 1
# #logging.debug("PACKED pkts: "+str(self._packed_pkts))
# continue
if pkt[0]["state"] == "ARP_OK":
logging.debug("Received ARP_OK: "+json.dumps(pkt))
self.context.put_pkt_in_shelve2(pkt)
continue
#print json.dumps(pkt)
l = len(pkt)
func = pkt[l-1]["func"]
data = pkt[l-2]["data"]
if ":" in func:
func = func.split(":")[1]
#if func in PmkSeed.iplugins.keys():
# klass = PmkSeed.iplugins[func]
# rt = klass._stage_run(pkt, data)
if func in keys():
klass = iplugins[func]
if speedy:
rt = klass._stage_run_express(pkt, data)
else:
rt = klass._stage_run(pkt, data)
class Injector(SThread):
def __init__(self, context):
SThread.__init__(self)
self.context = context
def run(self):
for x in PmkSeed.iplugins.keys():
klass = PmkSeed.iplugins[x]
if not klass.hasInputs():
#klass.run(klass.__rawpacket())
klass.rawrun()
if self.stopped():
logging.debug("Exiting thread "+self.__class__.__name__)
break
else:
continue
class RabbitMQMonitor():
class MonitorThread(SThread):
def __init__(self, parent, context, connection, queue, exchange=''):
SThread.__init__ (self)
self.context = context
host, port, username, password, vhost = self.context.get_rabbitmq_cred()
credentials = pika.PlainCredentials(username, password)
if connection == None:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=host, credentials=credentials, virtual_host=vhost))
else:
self.connection = connection
self.parent = parent
self.tag_map = self.parent.tag_map
self.channel = self.connection.channel()
self.queue = queue
self.exchange = exchange
self.cnt = 0
self.channel.basic_qos(prefetch_count=1000)
#self.channel.exchange_declare(exchange=str(exchange), type='fanout')
#self.channel.queue_declare(queue=str(queue))
#self.channel.queue_bind(exchange=str(exchange),
# queue=str(queue))
self.channel.queue_declare(queue=str(queue), durable=False, exclusive=True)
#self.channel.basic_consume(self.callback,
# queue=queue,
# no_ack=True)
def loop(self):
rx = self.context.getRx()
while self.connection.is_open:
try:
#FIX: bug trap empty queue
method, properties, bodyz = self.channel.basic_get(queue=self.queue, no_ack=True)
if method:
if (method.NAME == 'Basic.GetEmpty'):
time.sleep(1)
else:
self.cnt += 1
body = zlib.decompress(bodyz)
logging.debug("RabbitMQ received from "+self.queue+": "+ str(body))
pkt = json.loads(body)
rx.dig(pkt)
rx.put(pkt)
# pkt = json.loads(body)
#
# l = len(pkt)
# func = None
# if method.routing_key in self.tag_map:
# func = self.tag_map[method.routing_key]
# if ":" in func:
# func = func.split(":")[1]
# data = pkt[l-1]["data"]
#
# if func in PmkSeed.iplugins.keys():
# klass = PmkSeed.iplugins[func]
# rt = klass._stage_run(pkt, data)
else:
time.sleep(1)
except pika.exceptions.ConnectionClosed as e:
logging.warning("Pika connection to "+self.queue+" closed.")
# def callback(self, ch, method, properties, body):
# self.cnt += 1
# logging.debug("RabbitMQ received: "+ str(self.cnt))
# pkt = json.loads(body)
# l = len(pkt)
# func = None
# if method.routing_key in self.tag_map:
# func = self.tag_map[method.routing_key]
# data = pkt[l-1]["data"]
#
# if func in PmkSeed.iplugins.keys():
# klass = PmkSeed.iplugins[func]
# rt = klass._stage_run(pkt, data)
def run(self):
#self.channel.start_consuming()
self.loop()
def __init__(self, context, connection):
self.connection = connection
self.channel = connection.channel()
self.context = context
self.tag_map = {}
def add_monitor_queue(self, queue, func=None):
self.tag_map[queue] = func
#fqueue = queue+":"+self.context.getUuid()+":"+func
fqueue = queue
qthread = RabbitMQMonitor.MonitorThread(self, self.context, None, fqueue, exchange='')
qthread.start()
#TODO:fix default queue
# aqueue = queue.split(":")
# if len(aqueue) > 2:
# queue2 = "T:"+aqueue[1]+":"+aqueue[2]
# self.tag_map[queue2] = func
#
# # qthread = RabbitMQMonitor.MonitorThread(self, self.context, None, queue)
# # qthread.start()
#
# try:
# self.channel = self.connection.channel()
# self.channel.queue_declare(queue=str(queue2), passive=True,durable=True)
# logging.info("Using default rabbitmq queue: "+queue2)
# qthread = RabbitMQMonitor.MonitorThread(self, self.context, None, queue2)
# qthread.start()
# except Exception as e:
# qthread = RabbitMQMonitor.MonitorThread(self, self.context, None, queue)
# qthread.start()
#self.channel.queue_declare(queue=queue)
#self.channel.basic_consume(self.callback,
# queue=queue,
# no_ack=True)
#self.channel.basic_qos(prefetch_count=10)
#threading.Thread(target=self.channel.start_consuming)
#self.channel.start_consuming()
class ZMQPacketMonitor(SThread):
def __init__(self, context, zmqcontext, bind_to):
SThread.__init__ (self)
self.context = context
self.bind_to = bind_to
if (zmqcontext == None):
self.zmq_cntx = zmq.Context()
pass
else:
self.zmq_cntx = zmqcontext
#self.zmq_cntx = zmq.Context()
self.rx = self.context.getRx()
def proccess_pkt(self, pkts):
pkt = json.loads(pkts)
logging.debug("PACKET RECEIVED: "+pkts)
#Check for PACK
if pkt[0]["state"] == "PACK_OK":
seed = pkt[0]["last_func"]
if seed in PmkSeed.iplugins.keys():
klass = PmkSeed.iplugins[seed]
klass.pack_ok(pkt)
self._packed_pkts += 1
#logging.debug("PACKED pkts: "+str(self._packed_pkts))
return True
# if pkt[0]["state"] == "MERGE":
# seed = pkt[0]["last_func"]
#
# if seed in PmkSeed.iplugins.keys():
# klass = PmkSeed.iplugins[seed]
# klass.merge(pkt)
# #klass.pack_ok(pkt)
# #self._packed_pkts += 1
# #logging.debug("PACKED pkts: "+str(self._packed_pkts))
# continue
if pkt[0]["state"] == "ARP_OK":
logging.debug("Received ARP_OK: "+json.dumps(pkt))
self.context.put_pkt_in_shelve2(pkt)
return True
l = len(pkt)
func = pkt[l-1]["func"]
data = pkt[l-2]["data"]
if ":" in func:
func = func.split(":")[1]
if func in PmkSeed.iplugins.keys():
klass = PmkSeed.iplugins[func]
rt = klass._stage_run(pkt, data)
def run(self):
#context = zmq.Context()
soc = self.zmq_cntx.socket(zmq.PULL)
soc.setsockopt(zmq.RCVBUF, 2000)
soc.setsockopt(zmq.RCVHWM, 2000)
try:
soc.bind(self.bind_to)
except zmq.ZMQError as e:
nip = PmkBroadcast.get_llan_ip()
self.bind_to = "tcp://"+str(nip)+":"+str(PmkShared.ZMQ_ENDPOINT_PORT)
logging.warning("Rebinding to: "+self.bind_to)
soc.bind(self.bind_to)
#soc.setsockopt(zmq.HWM, 1000)
#soc.setsockopt(zmq.SUBSCRIBE,self.topic)
#soc.setsockopt(zmq.RCVTIMEO, 10000)
queue_put = self.context.getRx().put
dig = self.context.getRx().dig
while True:
try:
msg = soc.recv()
#self.context.getRx().put(msg)
d_msg = zlib.decompress(msg)
pkt = json.loads(d_msg)
dig(pkt)
queue_put(pkt)
#self.proccess_pkt(msg)
#del msg
# if "REVERSE" in msg:
# logging.debug(msg)
# ep = msg.split("::")[1]
# logging.debug("Reverse connecting to: "+ep)
# rec = self.zmq_cntx.socket(zmq.PULL)
# rec.connect(ep)
# msg = rec.recv()
# logging.debug("Received msg: "+msg)
# #continue
#self.rx.put(msg)
#logging.debug("Message: "+str(msg))
except zmq.ZMQError as e:
if self.stopped():
logging.debug("Exiting thread "+ self.__class__.__name__)
soc.close()
#zmq_cntx.destroy()
#zmq_cntx.term()
break
else:
continue
# except Exception as e:
# logging.error(str(e))
#except MemoryError as e:
# logging.error(str(e))
# sys.exit(1)
pass
#class InternalDispatch2(Thread):
# def __init__(self, context):
# Thread.__init__(self)
# self.context = context
# pass
#
# def run(self):
#
# rx = self.context.getRx()
# tx = self.context.getTx()
# while 1:
# #fname = rx.get(True)
# #fh = open(fname, "r")
# #pkt = fh.read()
# pkt = rx.get(True)
# m = re.search('##START-CONF(.+?)##END-CONF(.*)', pkt, re.S)
# if m:
# pkt_header = m.group(1)
# pkt_data = m.group(2)
# d = json.loads(pkt_header)
# for fc in d["invoke"]:
# state = fc["state"]
# if not ((int(state) & DRPackets.READY_STATE) == 1):
# func = fc["func"]
# logging.debug("Trying invoking local function: "+str(func))
# if func in DRPlugin.hplugins:
# klass = DRPlugin.hplugins[func](self.context)
# #klass.on_load()
# rt = klass.run(pkt_data)
# pkt_data = rt
# #xf = klass()
# logging.debug("RESULT: "+str(rt))
# fc["state"] = DRPackets.READY_STATE
# opkt = "##START-CONF" + json.dumps(d) + "##END-CONF\n"+str(rt)
# logging.debug("Out PKT: "+ str(opkt))
# #tx.put(opkt)
#
# #foutname = "./tx/"+d["container-id"]+d["box-id"]+".pkt"
# #fout = open(foutname, "w")
# #fout.write(strg)
# #fout.flush()
# #fout.close()
# #logging.debug("HERE 1")
# #tx.put(d,True)
# #logging.debug("HERE 2")
# #break
#
# #logging.debug("Return result: "+str(strg))
# else:
# logging.debug("No local function "+func+" found")
#
#
# else:
# logging.debug("Ready moving on")
#
#
#
# #logging.debug("Packet dispatch: "+str(pkt_header))
| recap/pumpkin | pumpkin/PmkInternalDispatch.py | Python | mit | 15,836 |
"""
Countdown
=========
"""
from tryalgo.arithm_expr_target import arithm_expr_target
arithm_expr_target([25, 50, 75, 100, 3, 6], 952)
# %%
# Returns :code:`((((75*3)*(100+6))-50)/25)=952`.
#
# See on our blog the `original Countdown video <https://tryalgo.org/fr/2017/03/14/le-compte-est-bon/>`_ behind this example.
| jilljenn/tryalgo | examples/arithm_expr_target.py | Python | mit | 322 |
#!/usr/bin/env python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from tspapi import Source
from tspapi import Sender
import tspapi.source
import json
class SourceTest(TestCase):
def test_default_contructor(self):
source = Source()
self.assertIsNone(source.ref)
self.assertIsNone(source.type)
self.assertIsNone(source.name)
self.assertIsNone(source.properties)
def test_constructor_args(self):
ref = 'foo'
_type = 'host'
name = 'bar'
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
source = Source(ref=ref, _type=_type, name=name, properties=properties)
self.assertEqual(source.ref, ref)
self.assertEqual(source.type, _type)
self.assertEqual(source.name, name)
self.assertEqual(source.properties, properties)
def test_ref(self):
ref = 'bar'
source = Source(ref=ref)
self.assertEqual(source.ref, ref)
def test_type(self):
_type = 'blah'
source = Source(_type=_type)
self.assertEqual(source.type, _type)
def test_name(self):
name = 'hello'
source = Source(name=name)
self.assertEqual(source.name, name)
def test_properties(self):
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
source = Source(properties=properties)
self.assertEqual(1, properties['red'])
self.assertEqual('foo', properties['blue'])
self.assertEqual(1.0, properties['green'])
def test_to_json(self):
ref = 'device'
_type = 'blah'
name = 'hello'
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
source = Source(ref=ref, _type=_type, name=name, properties=properties)
output = json.dumps(source, sort_keys=True, default=tspapi.source.serialize_instance)
expected = '{"name": "hello", "properties": {"blue": "foo", "green": 1.0, "red": 1}, ' + \
'"ref": "device", "type": "blah"}'
self.assertEqual(expected, output)
class SenderTest(TestCase):
def test_default_constructor(self):
sender = Sender()
self.assertIsNone(sender.ref)
self.assertIsNone(sender.type)
self.assertIsNone(sender.name)
self.assertIsNone(sender.properties)
def test_constructor_args(self):
ref = 'foo'
_type = 'host'
name = 'bar'
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
sender = Sender(ref=ref, _type=_type, name=name, properties=properties)
self.assertEqual(sender.ref, ref)
self.assertEqual(sender.type, _type)
self.assertEqual(sender.name, name)
self.assertEqual(sender.properties, properties)
def test_ref(self):
ref = 'bar'
sender = Sender(ref=ref)
self.assertEqual(sender.ref, ref)
def test_type(self):
_type = 'blah'
sender = Sender(_type=_type)
self.assertEqual(sender.type, _type)
def test_name(self):
name = 'hello'
sender = Sender(name=name)
self.assertEqual(sender.name, name)
def test_properties(self):
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
sender = Sender(properties=properties)
self.assertEqual(1, properties['red'])
self.assertEqual('foo', properties['blue'])
self.assertEqual(1.0, properties['green'])
def test_to_json(self):
ref = 'device'
_type = 'blah'
name = 'hello'
properties = {'red': 1, 'blue': 'foo', 'green': 1.0}
sender = Sender(ref=ref, _type=_type, name=name, properties=properties)
output = json.dumps(sender, sort_keys=True, default=tspapi.source.serialize_instance)
expected = '{"name": "hello", "properties": {"blue": "foo", "green": 1.0, "red": 1}, ' + \
'"ref": "device", "type": "blah"}'
self.assertEqual(expected, output)
| jdgwartney/pulse-api-python | tests/unit/tspapi/source_test.py | Python | apache-2.0 | 4,505 |
from webassets.filter import ExternalTool
class Babel(ExternalTool):
"""Processes ES6+ code into ES5 friendly code using `Babel <https://babeljs.io/>`_.
Requires the babel executable to be available externally.
To install it, you might be able to do::
$ npm install --global babel-cli
You probably also want some presets::
$ npm install --global babel-preset-es2015
Example python bundle:
.. code-block:: python
es2015 = get_filter('babel', presets='es2015')
bundle = Bundle('**/*.js', filters=es2015)
Example YAML bundle:
.. code-block:: yaml
es5-bundle:
output: dist/es5.js
config:
BABEL_PRESETS: es2015
filters: babel
contents:
- file1.js
- file2.js
Supported configuration options:
BABEL_BIN
The path to the babel binary. If not set the filter will try to run
``babel`` as if it's in the system path.
BABEL_PRESETS
Passed straight through to ``babel --presets`` to specify which babel
presets to use
BABEL_EXTRA_ARGS
A list of manual arguments to be specified to the babel command
BABEL_RUN_IN_DEBUG
May be set to False to make babel not run in debug
"""
name = 'babel'
max_debug_level = None
options = {
'binary': 'BABEL_BIN',
'presets': 'BABEL_PRESETS',
'extra_args': 'BABEL_EXTRA_ARGS',
'run_in_debug': 'BABEL_RUN_IN_DEBUG',
}
def setup(self):
super(Babel, self).setup()
if self.run_in_debug is False:
# Disable running in debug mode for this instance.
self.max_debug_level = False
def input(self, _in, out, **kw):
args = [self.binary or 'babel']
if self.presets:
args += ['--presets', self.presets]
if self.extra_args:
args.extend(self.extra_args)
return self.subprocess(args, out, _in)
| morreene/tradenews | venv/Lib/site-packages/webassets/filter/babel.py | Python | bsd-3-clause | 2,007 |
#!/usr/bin/python
################################################################################
# twittertail.py
################################################################################
# Copyright 2010 Etienne Membrives <etienne@membrives.fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
################################################################################
import twitter
import threading, time, Queue, locale, getopt
from optparse import OptionParser
class SearchProducer(threading.Thread):
"""Manages the Twitter search by regularily querying the API and adding new results to a shared queue."""
def __init__(self,query,queue,delay,sync,lock):
"""query: string representing the query
queue: Queue.Queue holding not-yet-displayed messages
delay: int representing the delay between two queries
sync: a consumer, used sync the next update of the queue
lock: lock mechanism
"""
self.__query=query
self.__api=twitter.Api()
self.__since=None
self.__queue=queue
self.__delay=delay
self.__sync=sync
self.__lock=lock
self.__cont=True
threading.Thread.__init__(self)
def stop(self):
"""Stop the query"""
self.__cont=False
@property
def delay(self):
"""Returns the delay between queries"""
return self.__delay
@property
def query(self):
"""Returns the current query"""
return self.__query
def fetch_query(self):
"""Updates the queue by querying Twitter"""
res=list(self.__api.Search(self.query,rpp=100,since_id=self.__since).results)
self.__since=res[0].id
res.reverse()
for s in res:
self.__queue.put(s)
def run(self):
"""Run indefinitely"""
while self.__cont:
self.__lock.acquire()
self.fetch_query()
self.__lock.notify()
self.__lock.release()
self.__sync.set_nextrefresh(time.time()+self.__delay)
time.sleep(self.__delay-1)
class DisplayConsumer(threading.Thread):
"""Manages the display of Twitter messages"""
def __init__(self,queue,lock):
"""queue: Queue.Queue holding not-yet-displayed messages
lock: lock mechanism
"""
self.__queue=queue
self.__nextrefresh=time.time()
self.__cont=True
self.__lock=lock
threading.Thread.__init__(self)
def set_nextrefresh(self,t):
"""Sets the next refresht time of the queue"""
self.__nextrefresh=t
def stop(self):
"""Stop displaying any new message and quit"""
self.__cont=False
def run(self):
s="%a, %d %b %Y %H:%M:%S +0000"
while self.__cont:
self.__lock.acquire()
if self.__queue.empty():
self.__lock.wait()
elem=self.__queue.get()
self.__lock.release()
#t=time.strptime(elem.created_at,s)
#print elem.from_user+" ("+time.strftime("%c",t)+")"
print elem.from_user+" ("+elem.created_at+")"
print "> "+elem.text.replace('\n',' ').replace(' ',' ').replace(' ',' ')
self.__queue.task_done()
if self.__queue.qsize()!=0 and self.__nextrefresh > time.time():
time.sleep((self.__nextrefresh-time.time())/float(self.__queue.qsize()))
# Here we define the program usage (as return by the flag --help)
# and we parse the command-line to populate internal variables
usage="usage: %prog [options] query"
option_parser=OptionParser(usage,version="%prog 0.1")
option_parser.add_option("-d", "--delay", dest="delay",
help="Delay (in seconds) between each Twitter query",default=60,type=int)
(options, args) = option_parser.parse_args()
query=args[0]
delay=options.delay
# Set up structures and threads
queue=Queue.Queue()
lock=threading.Lock()
cond=threading.Condition(lock)
dc=DisplayConsumer(queue,cond)
qp=SearchProducer(query,queue,delay,dc,cond)
# We want to use the system locale
locale.setlocale(locale.LC_ALL, '')
# .setDaemon(True) makes that the whole program ends as soon as the
# main thread ends
dc.setDaemon(True)
qp.setDaemon(True)
# Start everything
qp.start()
dc.start()
# While both threads are alive, continue
try:
while qp.isAlive() and dc.isAlive():
time.sleep(1)
# Catch the keyboard interrupt (^C), then quit
except KeyboardInterrupt:
pass
print "Quitting ..."
qp.stop()
dc.stop()
| emembrives/TwitterTail | twittertail.py | Python | gpl-3.0 | 5,126 |
import inspect
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import six
from .reducers import tuple_reducer, path_reducer, dot_reducer, underscore_reducer
from .splitters import tuple_splitter, path_splitter, dot_splitter, underscore_splitter
REDUCER_DICT = {
"tuple": tuple_reducer,
"path": path_reducer,
"dot": dot_reducer,
"underscore": underscore_reducer,
}
SPLITTER_DICT = {
"tuple": tuple_splitter,
"path": path_splitter,
"dot": dot_splitter,
"underscore": underscore_splitter,
}
def flatten(
d,
reducer="tuple",
inverse=False,
max_flatten_depth=None,
enumerate_types=(),
keep_empty_types=(),
):
"""Flatten `Mapping` object.
Parameters
----------
d : dict-like object
The dict that will be flattened.
reducer : {'tuple', 'path', 'underscore', 'dot', Callable}
The key joining method. If a `Callable` is given, the `Callable` will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys.
'path': Use `os.path.join` to join keys.
'underscore': Use underscores to join keys.
'dot': Use dots to join keys.
inverse : bool
Whether you want invert the resulting key and value.
max_flatten_depth : Optional[int]
Maximum depth to merge.
enumerate_types : Sequence[type]
Flatten these types using `enumerate`.
For example, if we set `enumerate_types` to ``(list,)``,
`list` indices become keys: ``{'a': ['b', 'c']}`` -> ``{('a', 0): 'b', ('a', 1): 'c'}``.
keep_empty_types : Sequence[type]
By default, ``flatten({1: 2, 3: {}})`` will give you ``{(1,): 2}``, that is, the key ``3``
will disappear.
This is also applied for the types in `enumerate_types`, that is,
``flatten({1: 2, 3: []}, enumerate_types=(list,))`` will give you ``{(1,): 2}``.
If you want to keep those empty values, you can specify the types in `keep_empty_types`:
>>> flatten({1: 2, 3: {}}, keep_empty_types=(dict,))
{(1,): 2, (3,): {}}
Returns
-------
flat_dict : dict
"""
enumerate_types = tuple(enumerate_types)
flattenable_types = (Mapping,) + enumerate_types
if not isinstance(d, flattenable_types):
raise ValueError(
"argument type %s is not in the flattenalbe types %s"
% (type(d), flattenable_types)
)
# check max_flatten_depth
if max_flatten_depth is not None and max_flatten_depth < 1:
raise ValueError("max_flatten_depth should not be less than 1.")
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
try:
# Python 3
reducer_accepts_parent_obj = len(inspect.signature(reducer).parameters) == 3
except AttributeError:
# Python 2
reducer_accepts_parent_obj = len(inspect.getargspec(reducer)[0]) == 3
flat_dict = {}
def _flatten(_d, depth, parent=None):
key_value_iterable = (
enumerate(_d) if isinstance(_d, enumerate_types) else six.viewitems(_d)
)
has_item = False
for key, value in key_value_iterable:
has_item = True
if reducer_accepts_parent_obj:
flat_key = reducer(parent, key, _d)
else:
flat_key = reducer(parent, key)
if isinstance(value, flattenable_types) and (
max_flatten_depth is None or depth < max_flatten_depth
):
# recursively build the result
has_child = _flatten(value, depth=depth + 1, parent=flat_key)
if has_child or not isinstance(value, keep_empty_types):
# ignore the key in this level because it already has child key
# or its value is empty
continue
# add an item to the result
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
return has_item
_flatten(d, depth=1)
return flat_dict
def nested_set_dict(d, keys, value):
"""Set a value to a sequence of nested keys.
Parameters
----------
d : Mapping
keys : Sequence[str]
value : Any
"""
assert keys
key = keys[0]
if len(keys) == 1:
if key in d:
raise ValueError("duplicated key '{}'".format(key))
d[key] = value
return
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value)
def unflatten(d, splitter="tuple", inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d : dict-like object
The dict that will be unflattened.
splitter : {'tuple', 'path', 'underscore', 'dot', Callable}
The key splitting method. If a Callable is given, the Callable will be
used to split `d`.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use `pathlib.Path.parts` to split keys.
'underscore': Use underscores to split keys.
'dot': Use underscores to split keys.
inverse : bool
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict : dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict
| ianlini/flatten-dict | src/flatten_dict/flatten_dict.py | Python | mit | 5,769 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Calculations with datetime values.
"""
__version__ = "$Id$"
#end_pymotw_header
import datetime
today = datetime.datetime.today()
print 'Today :', today
yesterday = today - datetime.timedelta(days=1)
print 'Yesterday:', yesterday
tomorrow = today + datetime.timedelta(days=1)
print 'Tomorrow :', tomorrow
print 'tomorrow - yesterday:', tomorrow - yesterday
print 'yesterday - tomorrow:', yesterday - tomorrow
print 'tomorrow > yesterday:', tomorrow > yesterday | qilicun/python | python2/PyMOTW-1.132/PyMOTW/datetime/datetime_datetime_math.py | Python | gpl-3.0 | 572 |
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, NullBooleanField, Q, Sum, Value,
)
from django.db.models.functions import Length, Lower
from django.test import TestCase, skipUnlessDBFeature
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_empty_expression_annotation(self):
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
books = Book.objects.annotate(
selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField())
)
self.assertEqual(len(books), Book.objects.count())
self.assertTrue(all(not book.selected for book in books))
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual({p.last_name for p in people}, {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
lengths = Employee.objects.annotate(
name_len=Length('first_name'),
).distinct('name_len').values_list('name_len', flat=True)
self.assertSequenceEqual(lengths, [3, 7, 8])
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_decimal_annotation(self):
salary = Decimal(10) ** -Employee._meta.get_field('salary').decimal_places
Employee.objects.create(
first_name='Max',
last_name='Paine',
store=Store.objects.first(),
age=23,
salary=salary,
)
self.assertEqual(
Employee.objects.annotate(new_salary=F('salary') / 10).get().new_salary,
salary / 10,
)
def test_filter_decimal_annotation(self):
qs = Book.objects.annotate(new_price=F('price') + 1).filter(new_price=Decimal(31)).values_list('new_price')
self.assertEqual(qs.get(), (Decimal(31),))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_values_with_pk_annotation(self):
# annotate references a field in values() with pk
publishers = Publisher.objects.values('id', 'book__rating').annotate(total=Sum('book__rating'))
for publisher in publishers.filter(pk=self.p1.pk):
self.assertEqual(publisher['book__rating'], publisher['total'])
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Columns are aligned in the correct order for resolve_columns. This test
will fail on MySQL if column ordering is out. Column fields should be
aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
def test_boolean_value_annotation(self):
books = Book.objects.annotate(
is_book=Value(True, output_field=BooleanField()),
is_pony=Value(False, output_field=BooleanField()),
is_none=Value(None, output_field=NullBooleanField()),
)
self.assertGreater(len(books), 0)
for book in books:
self.assertIs(book.is_book, True)
self.assertIs(book.is_pony, False)
self.assertIsNone(book.is_none)
def test_arguments_must_be_expressions(self):
msg = 'QuerySet.annotate() received non-expression(s): %s.'
with self.assertRaisesMessage(TypeError, msg % BooleanField()):
Book.objects.annotate(BooleanField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.annotate(is_book=True)
with self.assertRaisesMessage(TypeError, msg % ', '.join([str(BooleanField()), 'True'])):
Book.objects.annotate(BooleanField(), Value(False), is_book=True)
| reinout/django | tests/annotations/tests.py | Python | bsd-3-clause | 22,342 |
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import asyncio
from concurrent.futures import ProcessPoolExecutor
import pickle
import time
from thrift.perf.load_services import LoadTestInterface
from thrift.perf.load_types import LoadError
def us_to_sec(microseconds):
return microseconds / 1000000
def burn_in_executor(us):
start = time.time()
end = start + us_to_sec(us)
while time.time() < end:
pass
class LoadTestHandler(LoadTestInterface):
def __init__(self, loop=None):
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.pool = ProcessPoolExecutor()
pickle.DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
async def noop(self):
pass
async def onewayNoop(self):
pass
async def asyncNoop(self):
pass
async def sleep(self, us):
await asyncio.sleep(us_to_sec(us))
async def onewaySleep(self, us):
await asyncio.sleep(us_to_sec(us))
async def burn(self, us):
return await self.loop.run_in_executor(
self.pool,
burn_in_executor,
us)
async def onewayBurn(self, us):
return await self.loop.run_in_executor(
self.pool,
burn_in_executor,
us)
async def badSleep(self, us):
# "bad" because it sleeps on the main thread
time.sleep(us_to_sec(us))
async def badBurn(self, us):
return burn_in_executor(us)
async def throwError(self, code):
raise LoadError(code=code)
async def throwUnexpected(self, code):
raise LoadError(code=code)
async def send(self, data):
pass
async def onewaySend(self, data):
pass
async def recv(self, bytes):
return 'a' * bytes
async def sendrecv(self, data, recvBytes):
return 'a' * recvBytes
async def echo(self, data):
return data
async def add(self, a, b):
return a + b
async def largeContainer(self, data):
pass
async def iterAllFields(self, data):
for item in data:
x = item.stringField
for x in item.stringList:
pass
return data
| SergeyMakarenko/fbthrift | thrift/perf/py3/load_handler.py | Python | apache-2.0 | 2,965 |
import unittest
from python.src import coverage_demo
class test_coverage_demo(unittest.TestCase):
def test_foo_returns_true(self):
self.assertTrue(coverage_demo.foo(1))
def test_foo_returns_false(self):
self.assertFalse(coverage_demo.foo(0))
"""
def test_foo_throws_exception(self):
#self.assertFalse(coverage_demo.foo(0----0))
self.assertRaises(TypeError, coverage_demo.foo(0----0))
""" | Deepthibr28/software-testing | tutorials/unittests/python/tests/test_coverage_demo.py | Python | mit | 403 |
Subsets and Splits