repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
FireWRT/OpenWrt-Firefly-Libraries | refs/heads/master | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/plat-linux/CDROM.py | 330 | # Generated by h2py from /usr/include/linux/cdrom.h
CDROMPAUSE = 0x5301
CDROMRESUME = 0x5302
CDROMPLAYMSF = 0x5303
CDROMPLAYTRKIND = 0x5304
CDROMREADTOCHDR = 0x5305
CDROMREADTOCENTRY = 0x5306
CDROMSTOP = 0x5307
CDROMSTART = 0x5308
CDROMEJECT = 0x5309
CDROMVOLCTRL = 0x530a
CDROMSUBCHNL = 0x530b
CDROMREADMODE2 = 0x530c
CDROMREADMODE1 = 0x530d
CDROMREADAUDIO = 0x530e
CDROMEJECT_SW = 0x530f
CDROMMULTISESSION = 0x5310
CDROM_GET_MCN = 0x5311
CDROM_GET_UPC = CDROM_GET_MCN
CDROMRESET = 0x5312
CDROMVOLREAD = 0x5313
CDROMREADRAW = 0x5314
CDROMREADCOOKED = 0x5315
CDROMSEEK = 0x5316
CDROMPLAYBLK = 0x5317
CDROMREADALL = 0x5318
CDROMGETSPINDOWN = 0x531d
CDROMSETSPINDOWN = 0x531e
CDROMCLOSETRAY = 0x5319
CDROM_SET_OPTIONS = 0x5320
CDROM_CLEAR_OPTIONS = 0x5321
CDROM_SELECT_SPEED = 0x5322
CDROM_SELECT_DISC = 0x5323
CDROM_MEDIA_CHANGED = 0x5325
CDROM_DRIVE_STATUS = 0x5326
CDROM_DISC_STATUS = 0x5327
CDROM_CHANGER_NSLOTS = 0x5328
CDROM_LOCKDOOR = 0x5329
CDROM_DEBUG = 0x5330
CDROM_GET_CAPABILITY = 0x5331
CDROMAUDIOBUFSIZ = 0x5382
DVD_READ_STRUCT = 0x5390
DVD_WRITE_STRUCT = 0x5391
DVD_AUTH = 0x5392
CDROM_SEND_PACKET = 0x5393
CDROM_NEXT_WRITABLE = 0x5394
CDROM_LAST_WRITTEN = 0x5395
CDROM_PACKET_SIZE = 12
CGC_DATA_UNKNOWN = 0
CGC_DATA_WRITE = 1
CGC_DATA_READ = 2
CGC_DATA_NONE = 3
CD_MINS = 74
CD_SECS = 60
CD_FRAMES = 75
CD_SYNC_SIZE = 12
CD_MSF_OFFSET = 150
CD_CHUNK_SIZE = 24
CD_NUM_OF_CHUNKS = 98
CD_FRAMESIZE_SUB = 96
CD_HEAD_SIZE = 4
CD_SUBHEAD_SIZE = 8
CD_EDC_SIZE = 4
CD_ZERO_SIZE = 8
CD_ECC_SIZE = 276
CD_FRAMESIZE = 2048
CD_FRAMESIZE_RAW = 2352
CD_FRAMESIZE_RAWER = 2646
CD_FRAMESIZE_RAW1 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE)
CD_FRAMESIZE_RAW0 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE)
CD_XA_HEAD = (CD_HEAD_SIZE+CD_SUBHEAD_SIZE)
CD_XA_TAIL = (CD_EDC_SIZE+CD_ECC_SIZE)
CD_XA_SYNC_HEAD = (CD_SYNC_SIZE+CD_XA_HEAD)
CDROM_LBA = 0x01
CDROM_MSF = 0x02
CDROM_DATA_TRACK = 0x04
CDROM_LEADOUT = 0xAA
CDROM_AUDIO_INVALID = 0x00
CDROM_AUDIO_PLAY = 0x11
CDROM_AUDIO_PAUSED = 0x12
CDROM_AUDIO_COMPLETED = 0x13
CDROM_AUDIO_ERROR = 0x14
CDROM_AUDIO_NO_STATUS = 0x15
CDC_CLOSE_TRAY = 0x1
CDC_OPEN_TRAY = 0x2
CDC_LOCK = 0x4
CDC_SELECT_SPEED = 0x8
CDC_SELECT_DISC = 0x10
CDC_MULTI_SESSION = 0x20
CDC_MCN = 0x40
CDC_MEDIA_CHANGED = 0x80
CDC_PLAY_AUDIO = 0x100
CDC_RESET = 0x200
CDC_IOCTLS = 0x400
CDC_DRIVE_STATUS = 0x800
CDC_GENERIC_PACKET = 0x1000
CDC_CD_R = 0x2000
CDC_CD_RW = 0x4000
CDC_DVD = 0x8000
CDC_DVD_R = 0x10000
CDC_DVD_RAM = 0x20000
CDS_NO_INFO = 0
CDS_NO_DISC = 1
CDS_TRAY_OPEN = 2
CDS_DRIVE_NOT_READY = 3
CDS_DISC_OK = 4
CDS_AUDIO = 100
CDS_DATA_1 = 101
CDS_DATA_2 = 102
CDS_XA_2_1 = 103
CDS_XA_2_2 = 104
CDS_MIXED = 105
CDO_AUTO_CLOSE = 0x1
CDO_AUTO_EJECT = 0x2
CDO_USE_FFLAGS = 0x4
CDO_LOCK = 0x8
CDO_CHECK_TYPE = 0x10
CD_PART_MAX = 64
CD_PART_MASK = (CD_PART_MAX - 1)
GPCMD_BLANK = 0xa1
GPCMD_CLOSE_TRACK = 0x5b
GPCMD_FLUSH_CACHE = 0x35
GPCMD_FORMAT_UNIT = 0x04
GPCMD_GET_CONFIGURATION = 0x46
GPCMD_GET_EVENT_STATUS_NOTIFICATION = 0x4a
GPCMD_GET_PERFORMANCE = 0xac
GPCMD_INQUIRY = 0x12
GPCMD_LOAD_UNLOAD = 0xa6
GPCMD_MECHANISM_STATUS = 0xbd
GPCMD_MODE_SELECT_10 = 0x55
GPCMD_MODE_SENSE_10 = 0x5a
GPCMD_PAUSE_RESUME = 0x4b
GPCMD_PLAY_AUDIO_10 = 0x45
GPCMD_PLAY_AUDIO_MSF = 0x47
GPCMD_PLAY_AUDIO_TI = 0x48
GPCMD_PLAY_CD = 0xbc
GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e
GPCMD_READ_10 = 0x28
GPCMD_READ_12 = 0xa8
GPCMD_READ_CDVD_CAPACITY = 0x25
GPCMD_READ_CD = 0xbe
GPCMD_READ_CD_MSF = 0xb9
GPCMD_READ_DISC_INFO = 0x51
GPCMD_READ_DVD_STRUCTURE = 0xad
GPCMD_READ_FORMAT_CAPACITIES = 0x23
GPCMD_READ_HEADER = 0x44
GPCMD_READ_TRACK_RZONE_INFO = 0x52
GPCMD_READ_SUBCHANNEL = 0x42
GPCMD_READ_TOC_PMA_ATIP = 0x43
GPCMD_REPAIR_RZONE_TRACK = 0x58
GPCMD_REPORT_KEY = 0xa4
GPCMD_REQUEST_SENSE = 0x03
GPCMD_RESERVE_RZONE_TRACK = 0x53
GPCMD_SCAN = 0xba
GPCMD_SEEK = 0x2b
GPCMD_SEND_DVD_STRUCTURE = 0xad
GPCMD_SEND_EVENT = 0xa2
GPCMD_SEND_KEY = 0xa3
GPCMD_SEND_OPC = 0x54
GPCMD_SET_READ_AHEAD = 0xa7
GPCMD_SET_STREAMING = 0xb6
GPCMD_START_STOP_UNIT = 0x1b
GPCMD_STOP_PLAY_SCAN = 0x4e
GPCMD_TEST_UNIT_READY = 0x00
GPCMD_VERIFY_10 = 0x2f
GPCMD_WRITE_10 = 0x2a
GPCMD_WRITE_AND_VERIFY_10 = 0x2e
GPCMD_SET_SPEED = 0xbb
GPCMD_PLAYAUDIO_TI = 0x48
GPCMD_GET_MEDIA_STATUS = 0xda
GPMODE_R_W_ERROR_PAGE = 0x01
GPMODE_WRITE_PARMS_PAGE = 0x05
GPMODE_AUDIO_CTL_PAGE = 0x0e
GPMODE_POWER_PAGE = 0x1a
GPMODE_FAULT_FAIL_PAGE = 0x1c
GPMODE_TO_PROTECT_PAGE = 0x1d
GPMODE_CAPABILITIES_PAGE = 0x2a
GPMODE_ALL_PAGES = 0x3f
GPMODE_CDROM_PAGE = 0x0d
DVD_STRUCT_PHYSICAL = 0x00
DVD_STRUCT_COPYRIGHT = 0x01
DVD_STRUCT_DISCKEY = 0x02
DVD_STRUCT_BCA = 0x03
DVD_STRUCT_MANUFACT = 0x04
DVD_LAYERS = 4
DVD_LU_SEND_AGID = 0
DVD_HOST_SEND_CHALLENGE = 1
DVD_LU_SEND_KEY1 = 2
DVD_LU_SEND_CHALLENGE = 3
DVD_HOST_SEND_KEY2 = 4
DVD_AUTH_ESTABLISHED = 5
DVD_AUTH_FAILURE = 6
DVD_LU_SEND_TITLE_KEY = 7
DVD_LU_SEND_ASF = 8
DVD_INVALIDATE_AGID = 9
DVD_LU_SEND_RPC_STATE = 10
DVD_HOST_SEND_RPC_STATE = 11
DVD_CPM_NO_COPYRIGHT = 0
DVD_CPM_COPYRIGHTED = 1
DVD_CP_SEC_NONE = 0
DVD_CP_SEC_EXIST = 1
DVD_CGMS_UNRESTRICTED = 0
DVD_CGMS_SINGLE = 2
DVD_CGMS_RESTRICTED = 3
CDROM_MAX_SLOTS = 256
|
denisenkom/django-sqlserver | refs/heads/master | sqlserver/base.py | 1 | """Microsoft SQL Server database backend for Django."""
from __future__ import absolute_import, unicode_literals
import datetime
import collections
import django.db.backends.base.client
from django.utils.timezone import utc
import sqlserver_ado
import sqlserver_ado.base
import sqlserver_ado.compiler
import sqlserver_ado.operations
import sqlserver_ado.introspection
import sqlserver_ado.creation
try:
import pytds
except ImportError:
raise Exception('pytds is not available, to install pytds run pip install python-tds')
try:
import pytz
except ImportError:
pytz = None
DatabaseError = pytds.DatabaseError
IntegrityError = pytds.IntegrityError
_SUPPORTED_OPTIONS = [
'dsn', 'timeout',
'login_timeout', 'as_dict',
'appname', 'tds_version',
'blocksize', 'auth',
'readonly', 'bytes_to_unicode',
'row_strategy', 'cafile',
'validate_host', 'enc_login_only',
]
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
#
# Main class which uses pytds as a driver instead of adodb
#
class DatabaseWrapper(sqlserver_ado.base.DatabaseWrapper):
Database = pytds
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
from django.conf import settings
settings_dict = self.settings_dict
options = settings_dict.get('OPTIONS', {})
autocommit = options.get('autocommit', False)
conn_params = {
'server': settings_dict['HOST'],
'database': settings_dict['NAME'],
'user': settings_dict['USER'],
'port': settings_dict.get('PORT', '1433'),
'password': settings_dict['PASSWORD'],
'timeout': self.command_timeout,
'autocommit': autocommit,
'use_mars': options.get('use_mars', False),
'load_balancer': options.get('load_balancer', None),
'failover_partner': options.get('failover_partner', None),
'use_tz': utc if getattr(settings, 'USE_TZ', False) else None,
}
for opt in _SUPPORTED_OPTIONS:
if opt in options:
conn_params[opt] = options[opt]
self.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return conn_params
def create_cursor(self, name=None):
"""Creates a cursor. Assumes that a connection is established."""
cursor = self.connection.cursor()
cursor.tzinfo_factory = self.tzinfo_factory
return cursor
def __get_dbms_version(self, make_connection=True):
"""
Returns the 'DBMS Version' string
"""
major, minor, _, _ = self.get_server_version(make_connection=make_connection)
return '{}.{}'.format(major, minor)
def get_server_version(self, make_connection=True):
if not self.connection and make_connection:
self.connect()
major = (self.connection.product_version & 0xff000000) >> 24
minor = (self.connection.product_version & 0xff0000) >> 16
p1 = (self.connection.product_version & 0xff00) >> 8
p2 = self.connection.product_version & 0xff
return major, minor, p1, p2
#
# Next goes monkey patches which can be removed once those changes are merged into respective packages
#
#
# monkey patch DatabaseFeatures class
#
if django.VERSION >= (1, 11, 0):
sqlserver_ado.base.DatabaseFeatures.has_select_for_update = True
sqlserver_ado.base.DatabaseFeatures.has_select_for_update_nowait = True
sqlserver_ado.base.DatabaseFeatures.has_select_for_update_skip_locked = True
sqlserver_ado.base.DatabaseFeatures.for_update_after_from = True
# mssql does not have bit shift operations
# but we can implement such using */ 2^x
sqlserver_ado.base.DatabaseFeatures.supports_bitwise_leftshift = False
sqlserver_ado.base.DatabaseFeatures.supports_bitwise_rightshift = False
# probably can be implemented
sqlserver_ado.base.DatabaseFeatures.can_introspect_default = False
#
# monkey patch SQLCompiler class
#
def _call_base_as_sql_old(self, with_limits=True, with_col_aliases=False, subquery=False):
return super(sqlserver_ado.compiler.SQLCompiler, self).as_sql(
with_limits=with_limits,
with_col_aliases=with_col_aliases,
subquery=subquery,
)
def _call_base_as_sql_new(self, with_limits=True, with_col_aliases=False, subquery=False):
return super(sqlserver_ado.compiler.SQLCompiler, self).as_sql(
with_limits=with_limits,
with_col_aliases=with_col_aliases,
)
def _as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
# Get out of the way if we're not a select query or there's no limiting involved.
has_limit_offset = with_limits and (self.query.low_mark or self.query.high_mark is not None)
try:
if not has_limit_offset:
# The ORDER BY clause is invalid in views, inline functions,
# derived tables, subqueries, and common table expressions,
# unless TOP or FOR XML is also specified.
setattr(self.query, '_mssql_ordering_not_allowed', with_col_aliases)
# let the base do its thing, but we'll handle limit/offset
sql, fields = self._call_base_as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
subquery=subquery,
)
if has_limit_offset:
if ' order by ' not in sql.lower():
# Must have an ORDER BY to slice using OFFSET/FETCH. If
# there is none, use the first column, which is typically a
# PK
sql += ' ORDER BY 1'
sql += ' OFFSET %d ROWS' % (self.query.low_mark or 0)
if self.query.high_mark is not None:
sql += ' FETCH NEXT %d ROWS ONLY' % (self.query.high_mark - self.query.low_mark)
finally:
if not has_limit_offset:
# remove in case query is ever reused
delattr(self.query, '_mssql_ordering_not_allowed')
return sql, fields
if django.VERSION < (1, 11, 0):
sqlserver_ado.compiler.SQLCompiler._call_base_as_sql = _call_base_as_sql_old
else:
sqlserver_ado.compiler.SQLCompiler._call_base_as_sql = _call_base_as_sql_new
sqlserver_ado.compiler.SQLCompiler.as_sql = _as_sql
#
# monkey patch DatabaseOperations to support select_for_update
#
def _for_update_sql(self, nowait=False, skip_locked=False):
hints = ['ROWLOCK', 'UPDLOCK']
if nowait:
hints += ['NOWAIT']
if skip_locked:
hints += ['READPAST']
return "WITH ({})".format(','.join(hints))
def _value_to_db_date(self, value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = value.date()
return value.isoformat()
sqlserver_ado.operations.DatabaseOperations.for_update_sql = _for_update_sql
sqlserver_ado.operations.DatabaseOperations.value_to_db_date = _value_to_db_date
# monkey patch adoConn property onto connection class which is expected by django-mssql
# this can be removed if django-mssql would not use this property
pytds.Connection.adoConn = collections.namedtuple('AdoConn', 'Properties')(Properties=[])
#
# monkey patch sqlserver_ado.base.DatabaseWrapper class
#
def _get_new_connection(self, conn_params):
"""Opens a connection to the database."""
self.__connection_string = conn_params.get('connection_string', '')
conn = self.Database.connect(**conn_params)
return conn
sqlserver_ado.base.DatabaseWrapper.get_new_connection = _get_new_connection
sqlserver_ado.base.DatabaseWrapper.client_class = django.db.backends.base.client.BaseDatabaseClient
sqlserver_ado.base.DatabaseWrapper.creation_class = sqlserver_ado.creation.DatabaseCreation
sqlserver_ado.base.DatabaseWrapper.features_class = sqlserver_ado.base.DatabaseFeatures
sqlserver_ado.base.DatabaseWrapper.introspection_class = sqlserver_ado.introspection.DatabaseIntrospection
sqlserver_ado.base.DatabaseWrapper.ops_class = sqlserver_ado.operations.DatabaseOperations
|
pratapvardhan/pandas | refs/heads/master | pandas/tests/indexes/multi/conftest.py | 1 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas import Index, MultiIndex
@pytest.fixture
def idx():
# a MultiIndex used to test the general functionality of the
# general functionality of this object
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index_names = ['first', 'second']
index = MultiIndex(
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=index_names,
verify_integrity=False
)
return index
@pytest.fixture
def index_names():
# names that match those in the idx fixture for testing equality of
# names assigned to the idx
return ['first', 'second']
@pytest.fixture
def holder():
# the MultiIndex constructor used to base compatibility with pickle
return MultiIndex
@pytest.fixture
def compat_props():
# a MultiIndex must have these properties associated with it
return ['shape', 'ndim', 'size']
|
ycaihua/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_devpoll.py | 87 | # Test case for the select.devpoll() function
# Initial tests are copied as is from "test_poll.py"
import os
import random
import select
import sys
import unittest
from test.support import TESTFN, run_unittest, cpython_only
if not hasattr(select, 'devpoll') :
raise unittest.SkipTest('test works only on Solaris OS family')
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class DevPollTests(unittest.TestCase):
def test_devpoll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.devpoll()
NUM_PIPES = 12
MSG = b" This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
self.fail("no pipes ready for writing")
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
self.fail("no pipes ready for reading")
self.assertEqual([w2r[wr]], ready_readers)
rd = ready_readers[0]
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close(rd)
p.unregister(r2w[rd])
p.unregister(rd)
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def test_timeout_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
pollster.poll(-1)
self.assertRaises(OverflowError, pollster.poll, -2)
self.assertRaises(OverflowError, pollster.poll, -1 << 31)
self.assertRaises(OverflowError, pollster.poll, -1 << 64)
pollster.poll(0)
pollster.poll(1)
pollster.poll(1 << 30)
self.assertRaises(OverflowError, pollster.poll, 1 << 31)
self.assertRaises(OverflowError, pollster.poll, 1 << 63)
self.assertRaises(OverflowError, pollster.poll, 1 << 64)
def test_close(self):
open_file = open(__file__, "rb")
self.addCleanup(open_file.close)
fd = open_file.fileno()
devpoll = select.devpoll()
# test fileno() method and closed attribute
self.assertIsInstance(devpoll.fileno(), int)
self.assertFalse(devpoll.closed)
# test close()
devpoll.close()
self.assertTrue(devpoll.closed)
self.assertRaises(ValueError, devpoll.fileno)
# close() can be called more than once
devpoll.close()
# operations must fail with ValueError("I/O operation on closed ...")
self.assertRaises(ValueError, devpoll.modify, fd, select.POLLIN)
self.assertRaises(ValueError, devpoll.poll)
self.assertRaises(ValueError, devpoll.register, fd, fd, select.POLLIN)
self.assertRaises(ValueError, devpoll.unregister, fd)
def test_fd_non_inheritable(self):
devpoll = select.devpoll()
self.addCleanup(devpoll.close)
self.assertEqual(os.get_inheritable(devpoll.fileno()), False)
def test_events_mask_overflow(self):
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_events_mask_overflow_c_limits(self):
from _testcapi import USHRT_MAX
pollster = select.devpoll()
w, r = os.pipe()
pollster.register(w)
# Issue #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
def test_main():
run_unittest(DevPollTests)
if __name__ == '__main__':
test_main()
|
btjhjeon/ConversationalQA | refs/heads/master | skipthoughts/decoding/search.py | 2 | """
Code for sequence generation
"""
import numpy
import copy
def gen_sample(tparams, f_init, f_next, ctx, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False, use_unk=False):
"""
Generate a sample, using either beam search or stochastic sampling
"""
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
next_state = f_init(ctx)
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
inps = [next_w, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score += next_p[0,nw]
if nw == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
if not use_unk:
voc_size = next_p.shape[1]
for xx in range(len(cand_flat) / voc_size):
cand_flat[voc_size * xx + 1] = 1e20
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
|
ngokevin/zamboni | refs/heads/master | mkt/api/base.py | 2 | import functools
import json
from django.db.models.sql import EmptyResultSet
import commonware.log
from rest_framework.decorators import api_view
from rest_framework.exceptions import ParseError
from rest_framework.mixins import ListModelMixin
from rest_framework.routers import Route, SimpleRouter
from rest_framework.response import Response
from rest_framework.urlpatterns import format_suffix_patterns
import mkt
log = commonware.log.getLogger('z.api')
def list_url(name, **kw):
kw['resource_name'] = name
return ('api_dispatch_list', kw)
def get_url(name, pk, **kw):
kw.update({'resource_name': name, 'pk': pk})
return ('api_dispatch_detail', kw)
def _collect_form_errors(forms):
errors = {}
if not isinstance(forms, list):
forms = [forms]
for f in forms:
# If we've got form objects, get the error object off it.
# Otherwise assume we've just been passed a form object.
form_errors = getattr(f, 'errors', f)
if isinstance(form_errors, list): # Cope with formsets.
for e in form_errors:
errors.update(e)
continue
errors.update(dict(form_errors.items()))
return errors
def form_errors(forms):
errors = _collect_form_errors(forms)
raise ParseError(errors)
def check_potatocaptcha(data):
if data.get('tuber', False):
return Response(json.dumps({'tuber': 'Invalid value'}), 400)
if data.get('sprout', None) != 'potato':
return Response(json.dumps({'sprout': 'Invalid value'}), 400)
def get_region_from_request(request):
region = request.GET.get('region')
if region and region == 'None':
return None
return getattr(request, 'REGION', mkt.regions.RESTOFWORLD)
class SubRouter(SimpleRouter):
"""
Like SimpleRouter, but with the lookup before the prefix, so that it can be
easily used for sub-actions that are children of a main router.
This is a convenient way of linking one or more viewsets to a parent one
without having to set multiple @action and @link manually.
"""
routes = [
# List route.
Route(
url=r'^{lookup}/{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Detail route.
Route(
url=r'^{lookup}/{prefix}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'post': 'detail_post',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
)
]
class SubRouterWithFormat(SubRouter):
"""
SubRouter that also adds the optional format to generated URL patterns.
This is similar to DRF's DefaultRouter, except it's a SubRouter and we
don't respect the trailing_slash parameter with the URLs containing the
format parameter, because that'd make ugly, weird URLs.
"""
def get_urls(self):
# Keep trailing slash value...
trailing_slash = self.trailing_slash
# Generate base URLs without format.
base_urls = super(SubRouterWithFormat, self).get_urls()
# Generate the same URLs, but forcing to omit the trailing_slash.
self.trailing_slash = ''
extra_urls = super(SubRouterWithFormat, self).get_urls()
# Reset trailing slash and add format to our extra URLs.
self.trailing_slash = trailing_slash
extra_urls = format_suffix_patterns(extra_urls, suffix_required=True)
# Return the addition of both lists of URLs.
return base_urls + extra_urls
class MarketplaceView(object):
"""
Base view for DRF views.
It includes:
- An implementation of handle_exception() that goes with our custom
exception handler. It stores the request and originating class in the
exception before it's handed over the the handler, so that the handler
can in turn properly propagate the got_request_exception signal if
necessary.
- A implementation of paginate_queryset() that goes with our custom
pagination handler. It does tastypie-like offset pagination instead of
the default page mechanism.
"""
def handle_exception(self, exc):
exc._request = self.request._request
exc._klass = self.__class__
return super(MarketplaceView, self).handle_exception(exc)
def paginate_queryset(self, queryset, page_size=None):
page_query_param = self.request.QUERY_PARAMS.get(self.page_kwarg)
offset_query_param = self.request.QUERY_PARAMS.get('offset')
# If 'offset' (tastypie-style pagination) parameter is present and
# 'page' isn't, use offset it to find which page to use.
if page_query_param is None and offset_query_param is not None:
page_number = int(offset_query_param) / self.get_paginate_by() + 1
self.kwargs[self.page_kwarg] = page_number
return super(MarketplaceView, self).paginate_queryset(queryset,
page_size=page_size)
def get_region_from_request(self, request):
"""
Returns the REGION object for the passed request. If the GET param
`region` is `'None'`, return `None`. Otherwise, return `request.REGION`
which will have been set by the RegionMiddleware. If somehow we didn't
go through the middleware and request.REGION is absent, we fall back to
RESTOFWORLD.
"""
return get_region_from_request(request)
class MultiSerializerViewSetMixin(object):
"""
Allows attaching multiple serializers to a single viewset. A serializer
is chosen based on request.GET['serializer'] which is used to look up the
appropriate serializer in a serializers_classes map. Useful to not have to
create separate endpoints just to use different serializers (e.g.,
product-specific serializers, slimmed serializers).
"""
def get_serializer_class(self):
"""
Look for serializer class in self.serializer_classes. It will be looked
up using request.GET.serializer, i.e.:
class MyViewSet(ViewSet):
serializer_class = MySerializer
serializer_classes = {
'mini': MyMiniSerializer,
}
If there's no entry for that param then just fallback to the regular
get_serializer_class lookup: self.serializer_class.
"""
try:
return self.serializer_classes[self.request.GET.get('serializer')]
except KeyError:
return super(MultiSerializerViewSetMixin,
self).get_serializer_class()
class CORSMixin(object):
"""
Mixin to enable CORS for DRF API.
"""
def finalize_response(self, request, response, *args, **kwargs):
if not hasattr(request._request, 'CORS'):
request._request.CORS = self.cors_allowed_methods
return super(CORSMixin, self).finalize_response(
request, response, *args, **kwargs)
def cors_api_view(methods):
def decorator(f):
@api_view(methods)
@functools.wraps(f)
def wrapped(request):
request._request.CORS = methods
return f(request)
return wrapped
return decorator
class SlugOrIdMixin(object):
"""
Mixin that allows you to pass slugs instead of pk in your URLs. Use with
any router or urlpattern that relies on a relaxed regexp for pks, like
(?P<pk>[^/]+) (DRF does this by default).
If the name of your `slug` is called something else, override
`self.slug_field`.
"""
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
if pk and not pk.isdigit():
# If the `pk` contains anything other than a digit, it's a `slug`.
self.kwargs.update(pk=None, slug=self.kwargs['pk'])
return super(SlugOrIdMixin, self).get_object(queryset=queryset)
class SilentListModelMixin(ListModelMixin):
"""
DRF's ListModelMixin that returns a 204_NO_CONTENT rather than flipping a
500 or 404.
"""
def list(self, *args, **kwargs):
try:
res = super(SilentListModelMixin, self).list(*args, **kwargs)
except EmptyResultSet:
return Response([])
if res.status_code == 404:
return Response([])
return res
|
kmolab/kmolab.github.io | refs/heads/master | data/Brython-3.3.4/Lib/urllib/request.py | 8 | from browser import ajax
from . import error
class FileIO:
def __init__(self, data):
self._data=data
def read(self):
return self._data
def urlopen(url, data=None, timeout=None):
global result
result=None
def on_complete(req):
global result
if req.status == 200:
result=req
_ajax=ajax.ajax()
_ajax.bind('complete', on_complete)
if timeout is not None:
_ajax.set_timeout(timeout)
if data is None:
_ajax.open('GET', url, False)
_ajax.send()
else:
_ajax.open('POST', url, False)
_ajax.send(data)
if result is not None:
if isinstance(result.text, str):
return FileIO(result.text) #, url, {'status': result.status}
return FileIO(result.text()) #, url, {'status': result.status}
raise error.HTTPError('file not found') |
jordigh/mercurial-crew | refs/heads/master | mercurial/cmdutil.py | 1 | # cmdutil.py - help for command processing in mercurial
#
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, nullid, nullrev, short
from i18n import _
import os, sys, errno, re, tempfile
import util, scmutil, templater, patch, error, templatekw, revlog, copies
import match as matchmod
import subrepo, context, repair, graphmod, revset, phases, obsolete
import changelog
import bookmarks
import lock as lockmod
def parsealiases(cmd):
return cmd.lstrip("^").split("|")
def findpossible(cmd, table, strict=False):
"""
Return cmd -> (aliases, command table entry)
for each matching command.
Return debug commands (or their aliases) only if no normal command matches.
"""
choice = {}
debugchoice = {}
if cmd in table:
# short-circuit exact matches, "log" alias beats "^log|history"
keys = [cmd]
else:
keys = table.keys()
for e in keys:
aliases = parsealiases(e)
found = None
if cmd in aliases:
found = cmd
elif not strict:
for a in aliases:
if a.startswith(cmd):
found = a
break
if found is not None:
if aliases[0].startswith("debug") or found.startswith("debug"):
debugchoice[found] = (aliases, table[e])
else:
choice[found] = (aliases, table[e])
if not choice and debugchoice:
choice = debugchoice
return choice
def findcmd(cmd, table, strict=True):
"""Return (aliases, command table entry) for command string."""
choice = findpossible(cmd, table, strict)
if cmd in choice:
return choice[cmd]
if len(choice) > 1:
clist = choice.keys()
clist.sort()
raise error.AmbiguousCommand(cmd, clist)
if choice:
return choice.values()[0]
raise error.UnknownCommand(cmd)
def findrepo(p):
while not os.path.isdir(os.path.join(p, ".hg")):
oldp, p = p, os.path.dirname(p)
if p == oldp:
return None
return p
def bailifchanged(repo):
if repo.dirstate.p2() != nullid:
raise util.Abort(_('outstanding uncommitted merge'))
modified, added, removed, deleted = repo.status()[:4]
if modified or added or removed or deleted:
raise util.Abort(_('uncommitted changes'))
ctx = repo[None]
for s in sorted(ctx.substate):
if ctx.sub(s).dirty():
raise util.Abort(_("uncommitted changes in subrepo %s") % s)
def logmessage(ui, opts):
""" get the log message according to -m and -l option """
message = opts.get('message')
logfile = opts.get('logfile')
if message and logfile:
raise util.Abort(_('options --message and --logfile are mutually '
'exclusive'))
if not message and logfile:
try:
if logfile == '-':
message = ui.fin.read()
else:
message = '\n'.join(util.readfile(logfile).splitlines())
except IOError, inst:
raise util.Abort(_("can't read commit message '%s': %s") %
(logfile, inst.strerror))
return message
def loglimit(opts):
"""get the log limit according to option -l/--limit"""
limit = opts.get('limit')
if limit:
try:
limit = int(limit)
except ValueError:
raise util.Abort(_('limit must be a positive integer'))
if limit <= 0:
raise util.Abort(_('limit must be positive'))
else:
limit = None
return limit
def makefilename(repo, pat, node, desc=None,
total=None, seqno=None, revwidth=None, pathname=None):
node_expander = {
'H': lambda: hex(node),
'R': lambda: str(repo.changelog.rev(node)),
'h': lambda: short(node),
'm': lambda: re.sub('[^\w]', '_', str(desc))
}
expander = {
'%': lambda: '%',
'b': lambda: os.path.basename(repo.root),
}
try:
if node:
expander.update(node_expander)
if node:
expander['r'] = (lambda:
str(repo.changelog.rev(node)).zfill(revwidth or 0))
if total is not None:
expander['N'] = lambda: str(total)
if seqno is not None:
expander['n'] = lambda: str(seqno)
if total is not None and seqno is not None:
expander['n'] = lambda: str(seqno).zfill(len(str(total)))
if pathname is not None:
expander['s'] = lambda: os.path.basename(pathname)
expander['d'] = lambda: os.path.dirname(pathname) or '.'
expander['p'] = lambda: pathname
newname = []
patlen = len(pat)
i = 0
while i < patlen:
c = pat[i]
if c == '%':
i += 1
c = pat[i]
c = expander[c]()
newname.append(c)
i += 1
return ''.join(newname)
except KeyError, inst:
raise util.Abort(_("invalid format spec '%%%s' in output filename") %
inst.args[0])
def makefileobj(repo, pat, node=None, desc=None, total=None,
seqno=None, revwidth=None, mode='wb', modemap={},
pathname=None):
writable = mode not in ('r', 'rb')
if not pat or pat == '-':
fp = writable and repo.ui.fout or repo.ui.fin
if util.safehasattr(fp, 'fileno'):
return os.fdopen(os.dup(fp.fileno()), mode)
else:
# if this fp can't be duped properly, return
# a dummy object that can be closed
class wrappedfileobj(object):
noop = lambda x: None
def __init__(self, f):
self.f = f
def __getattr__(self, attr):
if attr == 'close':
return self.noop
else:
return getattr(self.f, attr)
return wrappedfileobj(fp)
if util.safehasattr(pat, 'write') and writable:
return pat
if util.safehasattr(pat, 'read') and 'r' in mode:
return pat
fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
mode = modemap.get(fn, mode)
if mode == 'wb':
modemap[fn] = 'ab'
return open(fn, mode)
def openrevlog(repo, cmd, file_, opts):
"""opens the changelog, manifest, a filelog or a given revlog"""
cl = opts['changelog']
mf = opts['manifest']
msg = None
if cl and mf:
msg = _('cannot specify --changelog and --manifest at the same time')
elif cl or mf:
if file_:
msg = _('cannot specify filename with --changelog or --manifest')
elif not repo:
msg = _('cannot specify --changelog or --manifest '
'without a repository')
if msg:
raise util.Abort(msg)
r = None
if repo:
if cl:
r = repo.changelog
elif mf:
r = repo.manifest
elif file_:
filelog = repo.file(file_)
if len(filelog):
r = filelog
if not r:
if not file_:
raise error.CommandError(cmd, _('invalid arguments'))
if not os.path.isfile(file_):
raise util.Abort(_("revlog '%s' not found") % file_)
r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
file_[:-2] + ".i")
return r
def copy(ui, repo, pats, opts, rename=False):
# called with the repo lock held
#
# hgsep => pathname that uses "/" to separate directories
# ossep => pathname that uses os.sep to separate directories
cwd = repo.getcwd()
targets = {}
after = opts.get("after")
dryrun = opts.get("dry_run")
wctx = repo[None]
def walkpat(pat):
srcs = []
badstates = after and '?' or '?r'
m = scmutil.match(repo[None], [pat], opts, globbed=True)
for abs in repo.walk(m):
state = repo.dirstate[abs]
rel = m.rel(abs)
exact = m.exact(abs)
if state in badstates:
if exact and state == '?':
ui.warn(_('%s: not copying - file is not managed\n') % rel)
if exact and state == 'r':
ui.warn(_('%s: not copying - file has been marked for'
' remove\n') % rel)
continue
# abs: hgsep
# rel: ossep
srcs.append((abs, rel, exact))
return srcs
# abssrc: hgsep
# relsrc: ossep
# otarget: ossep
def copyfile(abssrc, relsrc, otarget, exact):
abstarget = scmutil.canonpath(repo.root, cwd, otarget)
if '/' in abstarget:
# We cannot normalize abstarget itself, this would prevent
# case only renames, like a => A.
abspath, absname = abstarget.rsplit('/', 1)
abstarget = repo.dirstate.normalize(abspath) + '/' + absname
reltarget = repo.pathto(abstarget, cwd)
target = repo.wjoin(abstarget)
src = repo.wjoin(abssrc)
state = repo.dirstate[abstarget]
scmutil.checkportable(ui, abstarget)
# check for collisions
prevsrc = targets.get(abstarget)
if prevsrc is not None:
ui.warn(_('%s: not overwriting - %s collides with %s\n') %
(reltarget, repo.pathto(abssrc, cwd),
repo.pathto(prevsrc, cwd)))
return
# check for overwrites
exists = os.path.lexists(target)
samefile = False
if exists and abssrc != abstarget:
if (repo.dirstate.normalize(abssrc) ==
repo.dirstate.normalize(abstarget)):
if not rename:
ui.warn(_("%s: can't copy - same file\n") % reltarget)
return
exists = False
samefile = True
if not after and exists or after and state in 'mn':
if not opts['force']:
ui.warn(_('%s: not overwriting - file exists\n') %
reltarget)
return
if after:
if not exists:
if rename:
ui.warn(_('%s: not recording move - %s does not exist\n') %
(relsrc, reltarget))
else:
ui.warn(_('%s: not recording copy - %s does not exist\n') %
(relsrc, reltarget))
return
elif not dryrun:
try:
if exists:
os.unlink(target)
targetdir = os.path.dirname(target) or '.'
if not os.path.isdir(targetdir):
os.makedirs(targetdir)
if samefile:
tmp = target + "~hgrename"
os.rename(src, tmp)
os.rename(tmp, target)
else:
util.copyfile(src, target)
srcexists = True
except IOError, inst:
if inst.errno == errno.ENOENT:
ui.warn(_('%s: deleted in working copy\n') % relsrc)
srcexists = False
else:
ui.warn(_('%s: cannot copy - %s\n') %
(relsrc, inst.strerror))
return True # report a failure
if ui.verbose or not exact:
if rename:
ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
else:
ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
targets[abstarget] = abssrc
# fix up dirstate
scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
dryrun=dryrun, cwd=cwd)
if rename and not dryrun:
if not after and srcexists and not samefile:
util.unlinkpath(repo.wjoin(abssrc))
wctx.forget([abssrc])
# pat: ossep
# dest ossep
# srcs: list of (hgsep, hgsep, ossep, bool)
# return: function that takes hgsep and returns ossep
def targetpathfn(pat, dest, srcs):
if os.path.isdir(pat):
abspfx = scmutil.canonpath(repo.root, cwd, pat)
abspfx = util.localpath(abspfx)
if destdirexists:
striplen = len(os.path.split(abspfx)[0])
else:
striplen = len(abspfx)
if striplen:
striplen += len(os.sep)
res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
elif destdirexists:
res = lambda p: os.path.join(dest,
os.path.basename(util.localpath(p)))
else:
res = lambda p: dest
return res
# pat: ossep
# dest ossep
# srcs: list of (hgsep, hgsep, ossep, bool)
# return: function that takes hgsep and returns ossep
def targetpathafterfn(pat, dest, srcs):
if matchmod.patkind(pat):
# a mercurial pattern
res = lambda p: os.path.join(dest,
os.path.basename(util.localpath(p)))
else:
abspfx = scmutil.canonpath(repo.root, cwd, pat)
if len(abspfx) < len(srcs[0][0]):
# A directory. Either the target path contains the last
# component of the source path or it does not.
def evalpath(striplen):
score = 0
for s in srcs:
t = os.path.join(dest, util.localpath(s[0])[striplen:])
if os.path.lexists(t):
score += 1
return score
abspfx = util.localpath(abspfx)
striplen = len(abspfx)
if striplen:
striplen += len(os.sep)
if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
score = evalpath(striplen)
striplen1 = len(os.path.split(abspfx)[0])
if striplen1:
striplen1 += len(os.sep)
if evalpath(striplen1) > score:
striplen = striplen1
res = lambda p: os.path.join(dest,
util.localpath(p)[striplen:])
else:
# a file
if destdirexists:
res = lambda p: os.path.join(dest,
os.path.basename(util.localpath(p)))
else:
res = lambda p: dest
return res
pats = scmutil.expandpats(pats)
if not pats:
raise util.Abort(_('no source or destination specified'))
if len(pats) == 1:
raise util.Abort(_('no destination specified'))
dest = pats.pop()
destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
if not destdirexists:
if len(pats) > 1 or matchmod.patkind(pats[0]):
raise util.Abort(_('with multiple sources, destination must be an '
'existing directory'))
if util.endswithsep(dest):
raise util.Abort(_('destination %s is not a directory') % dest)
tfn = targetpathfn
if after:
tfn = targetpathafterfn
copylist = []
for pat in pats:
srcs = walkpat(pat)
if not srcs:
continue
copylist.append((tfn(pat, dest, srcs), srcs))
if not copylist:
raise util.Abort(_('no files to copy'))
errors = 0
for targetpath, srcs in copylist:
for abssrc, relsrc, exact in srcs:
if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
errors += 1
if errors:
ui.warn(_('(consider using --after)\n'))
return errors != 0
def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
runargs=None, appendpid=False):
'''Run a command as a service.'''
def writepid(pid):
if opts['pid_file']:
mode = appendpid and 'a' or 'w'
fp = open(opts['pid_file'], mode)
fp.write(str(pid) + '\n')
fp.close()
if opts['daemon'] and not opts['daemon_pipefds']:
# Signal child process startup with file removal
lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
os.close(lockfd)
try:
if not runargs:
runargs = util.hgcmd() + sys.argv[1:]
runargs.append('--daemon-pipefds=%s' % lockpath)
# Don't pass --cwd to the child process, because we've already
# changed directory.
for i in xrange(1, len(runargs)):
if runargs[i].startswith('--cwd='):
del runargs[i]
break
elif runargs[i].startswith('--cwd'):
del runargs[i:i + 2]
break
def condfn():
return not os.path.exists(lockpath)
pid = util.rundetached(runargs, condfn)
if pid < 0:
raise util.Abort(_('child process failed to start'))
writepid(pid)
finally:
try:
os.unlink(lockpath)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if parentfn:
return parentfn(pid)
else:
return
if initfn:
initfn()
if not opts['daemon']:
writepid(os.getpid())
if opts['daemon_pipefds']:
lockpath = opts['daemon_pipefds']
try:
os.setsid()
except AttributeError:
pass
os.unlink(lockpath)
util.hidewindow()
sys.stdout.flush()
sys.stderr.flush()
nullfd = os.open(os.devnull, os.O_RDWR)
logfilefd = nullfd
if logfile:
logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
os.dup2(nullfd, 0)
os.dup2(logfilefd, 1)
os.dup2(logfilefd, 2)
if nullfd not in (0, 1, 2):
os.close(nullfd)
if logfile and logfilefd not in (0, 1, 2):
os.close(logfilefd)
if runfn:
return runfn()
def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
opts=None):
'''export changesets as hg patches.'''
total = len(revs)
revwidth = max([len(str(rev)) for rev in revs])
filemode = {}
def single(rev, seqno, fp):
ctx = repo[rev]
node = ctx.node()
parents = [p.node() for p in ctx.parents() if p]
branch = ctx.branch()
if switch_parent:
parents.reverse()
prev = (parents and parents[0]) or nullid
shouldclose = False
if not fp and len(template) > 0:
desc_lines = ctx.description().rstrip().split('\n')
desc = desc_lines[0] #Commit always has a first line.
fp = makefileobj(repo, template, node, desc=desc, total=total,
seqno=seqno, revwidth=revwidth, mode='wb',
modemap=filemode)
if fp != template:
shouldclose = True
if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
repo.ui.note("%s\n" % fp.name)
if not fp:
write = repo.ui.write
else:
def write(s, **kw):
fp.write(s)
write("# HG changeset patch\n")
write("# User %s\n" % ctx.user())
write("# Date %d %d\n" % ctx.date())
write("# %s\n" % util.datestr(ctx.date()))
if branch and branch != 'default':
write("# Branch %s\n" % branch)
write("# Node ID %s\n" % hex(node))
write("# Parent %s\n" % hex(prev))
if len(parents) > 1:
write("# Parent %s\n" % hex(parents[1]))
write(ctx.description().rstrip())
write("\n\n")
for chunk, label in patch.diffui(repo, prev, node, opts=opts):
write(chunk, label=label)
if shouldclose:
fp.close()
for seqno, rev in enumerate(revs):
single(rev, seqno + 1, fp)
def diffordiffstat(ui, repo, diffopts, node1, node2, match,
changes=None, stat=False, fp=None, prefix='',
listsubrepos=False):
'''show diff or diffstat.'''
if fp is None:
write = ui.write
else:
def write(s, **kw):
fp.write(s)
if stat:
diffopts = diffopts.copy(context=0)
width = 80
if not ui.plain():
width = ui.termwidth()
chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
prefix=prefix)
for chunk, label in patch.diffstatui(util.iterlines(chunks),
width=width,
git=diffopts.git):
write(chunk, label=label)
else:
for chunk, label in patch.diffui(repo, node1, node2, match,
changes, diffopts, prefix=prefix):
write(chunk, label=label)
if listsubrepos:
ctx1 = repo[node1]
ctx2 = repo[node2]
for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
tempnode2 = node2
try:
if node2 is not None:
tempnode2 = ctx2.substate[subpath][1]
except KeyError:
# A subrepo that existed in node1 was deleted between node1 and
# node2 (inclusive). Thus, ctx2's substate won't contain that
# subpath. The best we can do is to ignore it.
tempnode2 = None
submatch = matchmod.narrowmatcher(subpath, match)
sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
stat=stat, fp=fp, prefix=prefix)
class changeset_printer(object):
'''show changeset information when templating not requested.'''
def __init__(self, ui, repo, patch, diffopts, buffered):
self.ui = ui
self.repo = repo
self.buffered = buffered
self.patch = patch
self.diffopts = diffopts
self.header = {}
self.hunk = {}
self.lastheader = None
self.footer = None
def flush(self, rev):
if rev in self.header:
h = self.header[rev]
if h != self.lastheader:
self.lastheader = h
self.ui.write(h)
del self.header[rev]
if rev in self.hunk:
self.ui.write(self.hunk[rev])
del self.hunk[rev]
return 1
return 0
def close(self):
if self.footer:
self.ui.write(self.footer)
def show(self, ctx, copies=None, matchfn=None, **props):
if self.buffered:
self.ui.pushbuffer()
self._show(ctx, copies, matchfn, props)
self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
else:
self._show(ctx, copies, matchfn, props)
def _show(self, ctx, copies, matchfn, props):
'''show a single changeset or file revision'''
changenode = ctx.node()
rev = ctx.rev()
if self.ui.quiet:
self.ui.write("%d:%s\n" % (rev, short(changenode)),
label='log.node')
return
log = self.repo.changelog
date = util.datestr(ctx.date())
hexfunc = self.ui.debugflag and hex or short
parents = [(p, hexfunc(log.node(p)))
for p in self._meaningful_parentrevs(log, rev)]
# i18n: column positioning for "hg log"
self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
label='log.changeset changeset.%s' % ctx.phasestr())
branch = ctx.branch()
# don't show the default branch name
if branch != 'default':
# i18n: column positioning for "hg log"
self.ui.write(_("branch: %s\n") % branch,
label='log.branch')
for bookmark in self.repo.nodebookmarks(changenode):
# i18n: column positioning for "hg log"
self.ui.write(_("bookmark: %s\n") % bookmark,
label='log.bookmark')
for tag in self.repo.nodetags(changenode):
# i18n: column positioning for "hg log"
self.ui.write(_("tag: %s\n") % tag,
label='log.tag')
if self.ui.debugflag and ctx.phase():
# i18n: column positioning for "hg log"
self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
label='log.phase')
for parent in parents:
# i18n: column positioning for "hg log"
self.ui.write(_("parent: %d:%s\n") % parent,
label='log.parent changeset.%s' % ctx.phasestr())
if self.ui.debugflag:
mnode = ctx.manifestnode()
# i18n: column positioning for "hg log"
self.ui.write(_("manifest: %d:%s\n") %
(self.repo.manifest.rev(mnode), hex(mnode)),
label='ui.debug log.manifest')
# i18n: column positioning for "hg log"
self.ui.write(_("user: %s\n") % ctx.user(),
label='log.user')
# i18n: column positioning for "hg log"
self.ui.write(_("date: %s\n") % date,
label='log.date')
if self.ui.debugflag:
files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
for key, value in zip([# i18n: column positioning for "hg log"
_("files:"),
# i18n: column positioning for "hg log"
_("files+:"),
# i18n: column positioning for "hg log"
_("files-:")], files):
if value:
self.ui.write("%-12s %s\n" % (key, " ".join(value)),
label='ui.debug log.files')
elif ctx.files() and self.ui.verbose:
# i18n: column positioning for "hg log"
self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
label='ui.note log.files')
if copies and self.ui.verbose:
copies = ['%s (%s)' % c for c in copies]
# i18n: column positioning for "hg log"
self.ui.write(_("copies: %s\n") % ' '.join(copies),
label='ui.note log.copies')
extra = ctx.extra()
if extra and self.ui.debugflag:
for key, value in sorted(extra.items()):
# i18n: column positioning for "hg log"
self.ui.write(_("extra: %s=%s\n")
% (key, value.encode('string_escape')),
label='ui.debug log.extra')
description = ctx.description().strip()
if description:
if self.ui.verbose:
self.ui.write(_("description:\n"),
label='ui.note log.description')
self.ui.write(description,
label='ui.note log.description')
self.ui.write("\n\n")
else:
# i18n: column positioning for "hg log"
self.ui.write(_("summary: %s\n") %
description.splitlines()[0],
label='log.summary')
self.ui.write("\n")
self.showpatch(changenode, matchfn)
def showpatch(self, node, matchfn):
if not matchfn:
matchfn = self.patch
if matchfn:
stat = self.diffopts.get('stat')
diff = self.diffopts.get('patch')
diffopts = patch.diffopts(self.ui, self.diffopts)
prev = self.repo.changelog.parents(node)[0]
if stat:
diffordiffstat(self.ui, self.repo, diffopts, prev, node,
match=matchfn, stat=True)
if diff:
if stat:
self.ui.write("\n")
diffordiffstat(self.ui, self.repo, diffopts, prev, node,
match=matchfn, stat=False)
self.ui.write("\n")
def _meaningful_parentrevs(self, log, rev):
"""Return list of meaningful (or all if debug) parentrevs for rev.
For merges (two non-nullrev revisions) both parents are meaningful.
Otherwise the first parent revision is considered meaningful if it
is not the preceding revision.
"""
parents = log.parentrevs(rev)
if not self.ui.debugflag and parents[1] == nullrev:
if parents[0] >= rev - 1:
parents = []
else:
parents = [parents[0]]
return parents
class changeset_templater(changeset_printer):
'''format changeset information.'''
def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
defaulttempl = {
'parent': '{rev}:{node|formatnode} ',
'manifest': '{rev}:{node|formatnode}',
'file_copy': '{name} ({source})',
'extra': '{key}={value|stringescape}'
}
# filecopy is preserved for compatibility reasons
defaulttempl['filecopy'] = defaulttempl['file_copy']
self.t = templater.templater(mapfile, {'formatnode': formatnode},
cache=defaulttempl)
self.cache = {}
def use_template(self, t):
'''set template string to use'''
self.t.cache['changeset'] = t
def _meaningful_parentrevs(self, ctx):
"""Return list of meaningful (or all if debug) parentrevs for rev.
"""
parents = ctx.parents()
if len(parents) > 1:
return parents
if self.ui.debugflag:
return [parents[0], self.repo['null']]
if parents[0].rev() >= ctx.rev() - 1:
return []
return parents
def _show(self, ctx, copies, matchfn, props):
'''show a single changeset or file revision'''
showlist = templatekw.showlist
# showparents() behaviour depends on ui trace level which
# causes unexpected behaviours at templating level and makes
# it harder to extract it in a standalone function. Its
# behaviour cannot be changed so leave it here for now.
def showparents(**args):
ctx = args['ctx']
parents = [[('rev', p.rev()), ('node', p.hex())]
for p in self._meaningful_parentrevs(ctx)]
return showlist('parent', parents, **args)
props = props.copy()
props.update(templatekw.keywords)
props['parents'] = showparents
props['templ'] = self.t
props['ctx'] = ctx
props['repo'] = self.repo
props['revcache'] = {'copies': copies}
props['cache'] = self.cache
# find correct templates for current mode
tmplmodes = [
(True, None),
(self.ui.verbose, 'verbose'),
(self.ui.quiet, 'quiet'),
(self.ui.debugflag, 'debug'),
]
types = {'header': '', 'footer':'', 'changeset': 'changeset'}
for mode, postfix in tmplmodes:
for type in types:
cur = postfix and ('%s_%s' % (type, postfix)) or type
if mode and cur in self.t:
types[type] = cur
try:
# write header
if types['header']:
h = templater.stringify(self.t(types['header'], **props))
if self.buffered:
self.header[ctx.rev()] = h
else:
if self.lastheader != h:
self.lastheader = h
self.ui.write(h)
# write changeset metadata, then patch if requested
key = types['changeset']
self.ui.write(templater.stringify(self.t(key, **props)))
self.showpatch(ctx.node(), matchfn)
if types['footer']:
if not self.footer:
self.footer = templater.stringify(self.t(types['footer'],
**props))
except KeyError, inst:
msg = _("%s: no key named '%s'")
raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
except SyntaxError, inst:
raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
def show_changeset(ui, repo, opts, buffered=False):
"""show one changeset using template or regular display.
Display format will be the first non-empty hit of:
1. option 'template'
2. option 'style'
3. [ui] setting 'logtemplate'
4. [ui] setting 'style'
If all of these values are either the unset or the empty string,
regular display via changeset_printer() is done.
"""
# options
patch = None
if opts.get('patch') or opts.get('stat'):
patch = scmutil.matchall(repo)
tmpl = opts.get('template')
style = None
if tmpl:
tmpl = templater.parsestring(tmpl, quoted=False)
else:
style = opts.get('style')
# ui settings
if not (tmpl or style):
tmpl = ui.config('ui', 'logtemplate')
if tmpl:
try:
tmpl = templater.parsestring(tmpl)
except SyntaxError:
tmpl = templater.parsestring(tmpl, quoted=False)
else:
style = util.expandpath(ui.config('ui', 'style', ''))
if not (tmpl or style):
return changeset_printer(ui, repo, patch, opts, buffered)
mapfile = None
if style and not tmpl:
mapfile = style
if not os.path.split(mapfile)[0]:
mapname = (templater.templatepath('map-cmdline.' + mapfile)
or templater.templatepath(mapfile))
if mapname:
mapfile = mapname
try:
t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
except SyntaxError, inst:
raise util.Abort(inst.args[0])
if tmpl:
t.use_template(tmpl)
return t
def finddate(ui, repo, date):
"""Find the tipmost changeset that matches the given date spec"""
df = util.matchdate(date)
m = scmutil.matchall(repo)
results = {}
def prep(ctx, fns):
d = ctx.date()
if df(d[0]):
results[ctx.rev()] = d
for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
rev = ctx.rev()
if rev in results:
ui.status(_("found revision %s from %s\n") %
(rev, util.datestr(results[rev])))
return str(rev)
raise util.Abort(_("revision matching date not found"))
def increasingwindows(start, end, windowsize=8, sizelimit=512):
if start < end:
while start < end:
yield start, min(windowsize, end - start)
start += windowsize
if windowsize < sizelimit:
windowsize *= 2
else:
while start > end:
yield start, min(windowsize, start - end - 1)
start -= windowsize
if windowsize < sizelimit:
windowsize *= 2
class FileWalkError(Exception):
pass
def walkfilerevs(repo, match, follow, revs, fncache):
'''Walks the file history for the matched files.
Returns the changeset revs that are involved in the file history.
Throws FileWalkError if the file history can't be walked using
filelogs alone.
'''
wanted = set()
copies = []
minrev, maxrev = min(revs), max(revs)
def filerevgen(filelog, last):
"""
Only files, no patterns. Check the history of each file.
Examines filelog entries within minrev, maxrev linkrev range
Returns an iterator yielding (linkrev, parentlinkrevs, copied)
tuples in backwards order
"""
cl_count = len(repo)
revs = []
for j in xrange(0, last + 1):
linkrev = filelog.linkrev(j)
if linkrev < minrev:
continue
# only yield rev for which we have the changelog, it can
# happen while doing "hg log" during a pull or commit
if linkrev >= cl_count:
break
parentlinkrevs = []
for p in filelog.parentrevs(j):
if p != nullrev:
parentlinkrevs.append(filelog.linkrev(p))
n = filelog.node(j)
revs.append((linkrev, parentlinkrevs,
follow and filelog.renamed(n)))
return reversed(revs)
def iterfiles():
pctx = repo['.']
for filename in match.files():
if follow:
if filename not in pctx:
raise util.Abort(_('cannot follow file not in parent '
'revision: "%s"') % filename)
yield filename, pctx[filename].filenode()
else:
yield filename, None
for filename_node in copies:
yield filename_node
for file_, node in iterfiles():
filelog = repo.file(file_)
if not len(filelog):
if node is None:
# A zero count may be a directory or deleted file, so
# try to find matching entries on the slow path.
if follow:
raise util.Abort(
_('cannot follow nonexistent file: "%s"') % file_)
raise FileWalkError("Cannot walk via filelog")
else:
continue
if node is None:
last = len(filelog) - 1
else:
last = filelog.rev(node)
# keep track of all ancestors of the file
ancestors = set([filelog.linkrev(last)])
# iterate from latest to oldest revision
for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
if not follow:
if rev > maxrev:
continue
else:
# Note that last might not be the first interesting
# rev to us:
# if the file has been changed after maxrev, we'll
# have linkrev(last) > maxrev, and we still need
# to explore the file graph
if rev not in ancestors:
continue
# XXX insert 1327 fix here
if flparentlinkrevs:
ancestors.update(flparentlinkrevs)
fncache.setdefault(rev, []).append(file_)
wanted.add(rev)
if copied:
copies.append(copied)
return wanted
def walkchangerevs(repo, match, opts, prepare):
'''Iterate over files and the revs in which they changed.
Callers most commonly need to iterate backwards over the history
in which they are interested. Doing so has awful (quadratic-looking)
performance, so we use iterators in a "windowed" way.
We walk a window of revisions in the desired order. Within the
window, we first walk forwards to gather data, then in the desired
order (usually backwards) to display it.
This function returns an iterator yielding contexts. Before
yielding each context, the iterator will first call the prepare
function on each context in the window in forward order.'''
follow = opts.get('follow') or opts.get('follow_first')
if opts.get('rev'):
revs = scmutil.revrange(repo, opts.get('rev'))
elif follow:
revs = repo.revs('reverse(:.)')
else:
revs = list(repo)
revs.reverse()
if not revs:
return []
wanted = set()
slowpath = match.anypats() or (match.files() and opts.get('removed'))
fncache = {}
change = repo.changectx
# First step is to fill wanted, the set of revisions that we want to yield.
# When it does not induce extra cost, we also fill fncache for revisions in
# wanted: a cache of filenames that were changed (ctx.files()) and that
# match the file filtering conditions.
if not slowpath and not match.files():
# No files, no patterns. Display all revs.
wanted = set(revs)
if not slowpath and match.files():
# We only have to read through the filelog to find wanted revisions
try:
wanted = walkfilerevs(repo, match, follow, revs, fncache)
except FileWalkError:
slowpath = True
# We decided to fall back to the slowpath because at least one
# of the paths was not a file. Check to see if at least one of them
# existed in history, otherwise simply return
for path in match.files():
if path == '.' or path in repo.store:
break
else:
return []
if slowpath:
# We have to read the changelog to match filenames against
# changed files
if follow:
raise util.Abort(_('can only follow copies/renames for explicit '
'filenames'))
# The slow path checks files modified in every changeset.
# This is really slow on large repos, so compute the set lazily.
class lazywantedset(object):
def __init__(self):
self.set = set()
self.revs = set(revs)
# No need to worry about locality here because it will be accessed
# in the same order as the increasing window below.
def __contains__(self, value):
if value in self.set:
return True
elif not value in self.revs:
return False
else:
self.revs.discard(value)
ctx = change(value)
matches = filter(match, ctx.files())
if matches:
fncache[value] = matches
self.set.add(value)
return True
return False
def discard(self, value):
self.revs.discard(value)
self.set.discard(value)
wanted = lazywantedset()
class followfilter(object):
def __init__(self, onlyfirst=False):
self.startrev = nullrev
self.roots = set()
self.onlyfirst = onlyfirst
def match(self, rev):
def realparents(rev):
if self.onlyfirst:
return repo.changelog.parentrevs(rev)[0:1]
else:
return filter(lambda x: x != nullrev,
repo.changelog.parentrevs(rev))
if self.startrev == nullrev:
self.startrev = rev
return True
if rev > self.startrev:
# forward: all descendants
if not self.roots:
self.roots.add(self.startrev)
for parent in realparents(rev):
if parent in self.roots:
self.roots.add(rev)
return True
else:
# backwards: all parents
if not self.roots:
self.roots.update(realparents(self.startrev))
if rev in self.roots:
self.roots.remove(rev)
self.roots.update(realparents(rev))
return True
return False
# it might be worthwhile to do this in the iterator if the rev range
# is descending and the prune args are all within that range
for rev in opts.get('prune', ()):
rev = repo[rev].rev()
ff = followfilter()
stop = min(revs[0], revs[-1])
for x in xrange(rev, stop - 1, -1):
if ff.match(x):
wanted.discard(x)
# Choose a small initial window if we will probably only visit a
# few commits.
limit = loglimit(opts)
windowsize = 8
if limit:
windowsize = min(limit, windowsize)
# Now that wanted is correctly initialized, we can iterate over the
# revision range, yielding only revisions in wanted.
def iterate():
if follow and not match.files():
ff = followfilter(onlyfirst=opts.get('follow_first'))
def want(rev):
return ff.match(rev) and rev in wanted
else:
def want(rev):
return rev in wanted
for i, window in increasingwindows(0, len(revs), windowsize):
nrevs = [rev for rev in revs[i:i + window] if want(rev)]
for rev in sorted(nrevs):
fns = fncache.get(rev)
ctx = change(rev)
if not fns:
def fns_generator():
for f in ctx.files():
if match(f):
yield f
fns = fns_generator()
prepare(ctx, fns)
for rev in nrevs:
yield change(rev)
return iterate()
def _makegraphfilematcher(repo, pats, followfirst):
# When displaying a revision with --patch --follow FILE, we have
# to know which file of the revision must be diffed. With
# --follow, we want the names of the ancestors of FILE in the
# revision, stored in "fcache". "fcache" is populated by
# reproducing the graph traversal already done by --follow revset
# and relating linkrevs to file names (which is not "correct" but
# good enough).
fcache = {}
fcacheready = [False]
pctx = repo['.']
wctx = repo[None]
def populate():
for fn in pats:
for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
for c in i:
fcache.setdefault(c.linkrev(), set()).add(c.path())
def filematcher(rev):
if not fcacheready[0]:
# Lazy initialization
fcacheready[0] = True
populate()
return scmutil.match(wctx, fcache.get(rev, []), default='path')
return filematcher
def _makegraphlogrevset(repo, pats, opts, revs):
"""Return (expr, filematcher) where expr is a revset string built
from log options and file patterns or None. If --stat or --patch
are not passed filematcher is None. Otherwise it is a callable
taking a revision number and returning a match objects filtering
the files to be detailed when displaying the revision.
"""
opt2revset = {
'no_merges': ('not merge()', None),
'only_merges': ('merge()', None),
'_ancestors': ('ancestors(%(val)s)', None),
'_fancestors': ('_firstancestors(%(val)s)', None),
'_descendants': ('descendants(%(val)s)', None),
'_fdescendants': ('_firstdescendants(%(val)s)', None),
'_matchfiles': ('_matchfiles(%(val)s)', None),
'date': ('date(%(val)r)', None),
'branch': ('branch(%(val)r)', ' or '),
'_patslog': ('filelog(%(val)r)', ' or '),
'_patsfollow': ('follow(%(val)r)', ' or '),
'_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
'keyword': ('keyword(%(val)r)', ' or '),
'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
'user': ('user(%(val)r)', ' or '),
}
opts = dict(opts)
# follow or not follow?
follow = opts.get('follow') or opts.get('follow_first')
followfirst = opts.get('follow_first') and 1 or 0
# --follow with FILE behaviour depends on revs...
startrev = revs[0]
followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
# branch and only_branch are really aliases and must be handled at
# the same time
opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
# pats/include/exclude are passed to match.match() directly in
# _matchfiles() revset but walkchangerevs() builds its matcher with
# scmutil.match(). The difference is input pats are globbed on
# platforms without shell expansion (windows).
pctx = repo[None]
match, pats = scmutil.matchandpats(pctx, pats, opts)
slowpath = match.anypats() or (match.files() and opts.get('removed'))
if not slowpath:
for f in match.files():
if follow and f not in pctx:
raise util.Abort(_('cannot follow file not in parent '
'revision: "%s"') % f)
filelog = repo.file(f)
if not filelog:
# A zero count may be a directory or deleted file, so
# try to find matching entries on the slow path.
if follow:
raise util.Abort(
_('cannot follow nonexistent file: "%s"') % f)
slowpath = True
# We decided to fall back to the slowpath because at least one
# of the paths was not a file. Check to see if at least one of them
# existed in history - in that case, we'll continue down the
# slowpath; otherwise, we can turn off the slowpath
if slowpath:
for path in match.files():
if path == '.' or path in repo.store:
break
else:
slowpath = False
if slowpath:
# See walkchangerevs() slow path.
#
if follow:
raise util.Abort(_('can only follow copies/renames for explicit '
'filenames'))
# pats/include/exclude cannot be represented as separate
# revset expressions as their filtering logic applies at file
# level. For instance "-I a -X a" matches a revision touching
# "a" and "b" while "file(a) and not file(b)" does
# not. Besides, filesets are evaluated against the working
# directory.
matchargs = ['r:', 'd:relpath']
for p in pats:
matchargs.append('p:' + p)
for p in opts.get('include', []):
matchargs.append('i:' + p)
for p in opts.get('exclude', []):
matchargs.append('x:' + p)
matchargs = ','.join(('%r' % p) for p in matchargs)
opts['_matchfiles'] = matchargs
else:
if follow:
fpats = ('_patsfollow', '_patsfollowfirst')
fnopats = (('_ancestors', '_fancestors'),
('_descendants', '_fdescendants'))
if pats:
# follow() revset interprets its file argument as a
# manifest entry, so use match.files(), not pats.
opts[fpats[followfirst]] = list(match.files())
else:
opts[fnopats[followdescendants][followfirst]] = str(startrev)
else:
opts['_patslog'] = list(pats)
filematcher = None
if opts.get('patch') or opts.get('stat'):
if follow:
filematcher = _makegraphfilematcher(repo, pats, followfirst)
else:
filematcher = lambda rev: match
expr = []
for op, val in opts.iteritems():
if not val:
continue
if op not in opt2revset:
continue
revop, andor = opt2revset[op]
if '%(val)' not in revop:
expr.append(revop)
else:
if not isinstance(val, list):
e = revop % {'val': val}
else:
e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
expr.append(e)
if expr:
expr = '(' + ' and '.join(expr) + ')'
else:
expr = None
return expr, filematcher
def getgraphlogrevs(repo, pats, opts):
"""Return (revs, expr, filematcher) where revs is an iterable of
revision numbers, expr is a revset string built from log options
and file patterns or None, and used to filter 'revs'. If --stat or
--patch are not passed filematcher is None. Otherwise it is a
callable taking a revision number and returning a match objects
filtering the files to be detailed when displaying the revision.
"""
if not len(repo):
return [], None, None
limit = loglimit(opts)
# Default --rev value depends on --follow but --follow behaviour
# depends on revisions resolved from --rev...
follow = opts.get('follow') or opts.get('follow_first')
possiblyunsorted = False # whether revs might need sorting
if opts.get('rev'):
revs = scmutil.revrange(repo, opts['rev'])
# Don't sort here because _makegraphlogrevset might depend on the
# order of revs
possiblyunsorted = True
else:
if follow and len(repo) > 0:
revs = repo.revs('reverse(:.)')
else:
revs = list(repo.changelog)
revs.reverse()
if not revs:
return [], None, None
expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
if possiblyunsorted:
revs.sort(reverse=True)
if expr:
# Revset matchers often operate faster on revisions in changelog
# order, because most filters deal with the changelog.
revs.reverse()
matcher = revset.match(repo.ui, expr)
# Revset matches can reorder revisions. "A or B" typically returns
# returns the revision matching A then the revision matching B. Sort
# again to fix that.
revs = matcher(repo, revs)
revs.sort(reverse=True)
if limit is not None:
revs = revs[:limit]
return revs, expr, filematcher
def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
filematcher=None):
seen, state = [], graphmod.asciistate()
for rev, type, ctx, parents in dag:
char = 'o'
if ctx.node() in showparents:
char = '@'
elif ctx.obsolete():
char = 'x'
copies = None
if getrenamed and ctx.rev():
copies = []
for fn in ctx.files():
rename = getrenamed(fn, ctx.rev())
if rename:
copies.append((fn, rename[0]))
revmatchfn = None
if filematcher is not None:
revmatchfn = filematcher(ctx.rev())
displayer.show(ctx, copies=copies, matchfn=revmatchfn)
lines = displayer.hunk.pop(rev).split('\n')
if not lines[-1]:
del lines[-1]
displayer.flush(rev)
edges = edgefn(type, char, lines, seen, rev, parents)
for type, char, lines, coldata in edges:
graphmod.ascii(ui, state, type, char, lines, coldata)
displayer.close()
def graphlog(ui, repo, *pats, **opts):
# Parameters are identical to log command ones
revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
revdag = graphmod.dagwalker(repo, revs)
getrenamed = None
if opts.get('copies'):
endrev = None
if opts.get('rev'):
endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
displayer = show_changeset(ui, repo, opts, buffered=True)
showparents = [ctx.node() for ctx in repo[None].parents()]
displaygraph(ui, revdag, displayer, showparents,
graphmod.asciiedges, getrenamed, filematcher)
def checkunsupportedgraphflags(pats, opts):
for op in ["newest_first"]:
if op in opts and opts[op]:
raise util.Abort(_("-G/--graph option is incompatible with --%s")
% op.replace("_", "-"))
def graphrevs(repo, nodes, opts):
limit = loglimit(opts)
nodes.reverse()
if limit is not None:
nodes = nodes[:limit]
return graphmod.nodes(repo, nodes)
def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
join = lambda f: os.path.join(prefix, f)
bad = []
oldbad = match.bad
match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
names = []
wctx = repo[None]
cca = None
abort, warn = scmutil.checkportabilityalert(ui)
if abort or warn:
cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
for f in repo.walk(match):
exact = match.exact(f)
if exact or not explicitonly and f not in repo.dirstate:
if cca:
cca(f)
names.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s\n') % match.rel(join(f)))
for subpath in sorted(wctx.substate):
sub = wctx.sub(subpath)
try:
submatch = matchmod.narrowmatcher(subpath, match)
if listsubrepos:
bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
False))
else:
bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
True))
except error.LookupError:
ui.status(_("skipping missing subrepository: %s\n")
% join(subpath))
if not dryrun:
rejected = wctx.add(names, prefix)
bad.extend(f for f in rejected if f in match.files())
return bad
def forget(ui, repo, match, prefix, explicitonly):
join = lambda f: os.path.join(prefix, f)
bad = []
oldbad = match.bad
match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
wctx = repo[None]
forgot = []
s = repo.status(match=match, clean=True)
forget = sorted(s[0] + s[1] + s[3] + s[6])
if explicitonly:
forget = [f for f in forget if match.exact(f)]
for subpath in sorted(wctx.substate):
sub = wctx.sub(subpath)
try:
submatch = matchmod.narrowmatcher(subpath, match)
subbad, subforgot = sub.forget(ui, submatch, prefix)
bad.extend([subpath + '/' + f for f in subbad])
forgot.extend([subpath + '/' + f for f in subforgot])
except error.LookupError:
ui.status(_("skipping missing subrepository: %s\n")
% join(subpath))
if not explicitonly:
for f in match.files():
if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
if f not in forgot:
if os.path.exists(match.rel(join(f))):
ui.warn(_('not removing %s: '
'file is already untracked\n')
% match.rel(join(f)))
bad.append(f)
for f in forget:
if ui.verbose or not match.exact(f):
ui.status(_('removing %s\n') % match.rel(join(f)))
rejected = wctx.forget(forget, prefix)
bad.extend(f for f in rejected if f in match.files())
forgot.extend(forget)
return bad, forgot
def duplicatecopies(repo, rev, fromrev):
'''reproduce copies from fromrev to rev in the dirstate'''
for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
# copies.pathcopies returns backward renames, so dst might not
# actually be in the dirstate
if repo.dirstate[dst] in "nma":
repo.dirstate.copy(src, dst)
def commit(ui, repo, commitfunc, pats, opts):
'''commit the specified files or all outstanding changes'''
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
message = logmessage(ui, opts)
# extract addremove carefully -- this function can be called from a command
# that doesn't support addremove
if opts.get('addremove'):
scmutil.addremove(repo, pats, opts)
return commitfunc(ui, repo, message,
scmutil.match(repo[None], pats, opts), opts)
def amend(ui, repo, commitfunc, old, extra, pats, opts):
ui.note(_('amending changeset %s\n') % old)
base = old.p1()
wlock = lock = newid = None
try:
wlock = repo.wlock()
lock = repo.lock()
tr = repo.transaction('amend')
try:
# See if we got a message from -m or -l, if not, open the editor
# with the message of the changeset to amend
message = logmessage(ui, opts)
# ensure logfile does not conflict with later enforcement of the
# message. potential logfile content has been processed by
# `logmessage` anyway.
opts.pop('logfile')
# First, do a regular commit to record all changes in the working
# directory (if there are any)
ui.callhooks = False
currentbookmark = repo._bookmarkcurrent
try:
repo._bookmarkcurrent = None
opts['message'] = 'temporary amend commit for %s' % old
node = commit(ui, repo, commitfunc, pats, opts)
finally:
repo._bookmarkcurrent = currentbookmark
ui.callhooks = True
ctx = repo[node]
# Participating changesets:
#
# node/ctx o - new (intermediate) commit that contains changes
# | from working dir to go into amending commit
# | (or a workingctx if there were no changes)
# |
# old o - changeset to amend
# |
# base o - parent of amending changeset
# Update extra dict from amended commit (e.g. to preserve graft
# source)
extra.update(old.extra())
# Also update it from the intermediate commit or from the wctx
extra.update(ctx.extra())
if len(old.parents()) > 1:
# ctx.files() isn't reliable for merges, so fall back to the
# slower repo.status() method
files = set([fn for st in repo.status(base, old)[:3]
for fn in st])
else:
files = set(old.files())
# Second, we use either the commit we just did, or if there were no
# changes the parent of the working directory as the version of the
# files in the final amend commit
if node:
ui.note(_('copying changeset %s to %s\n') % (ctx, base))
user = ctx.user()
date = ctx.date()
# Recompute copies (avoid recording a -> b -> a)
copied = copies.pathcopies(base, ctx)
# Prune files which were reverted by the updates: if old
# introduced file X and our intermediate commit, node,
# renamed that file, then those two files are the same and
# we can discard X from our list of files. Likewise if X
# was deleted, it's no longer relevant
files.update(ctx.files())
def samefile(f):
if f in ctx.manifest():
a = ctx.filectx(f)
if f in base.manifest():
b = base.filectx(f)
return (not a.cmp(b)
and a.flags() == b.flags())
else:
return False
else:
return f not in base.manifest()
files = [f for f in files if not samefile(f)]
def filectxfn(repo, ctx_, path):
try:
fctx = ctx[path]
flags = fctx.flags()
mctx = context.memfilectx(fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
copied=copied.get(path))
return mctx
except KeyError:
raise IOError
else:
ui.note(_('copying changeset %s to %s\n') % (old, base))
# Use version of files as in the old cset
def filectxfn(repo, ctx_, path):
try:
return old.filectx(path)
except KeyError:
raise IOError
user = opts.get('user') or old.user()
date = opts.get('date') or old.date()
editmsg = False
if not message:
editmsg = True
message = old.description()
pureextra = extra.copy()
extra['amend_source'] = old.hex()
new = context.memctx(repo,
parents=[base.node(), old.p2().node()],
text=message,
files=files,
filectxfn=filectxfn,
user=user,
date=date,
extra=extra)
if editmsg:
new._text = commitforceeditor(repo, new, [])
newdesc = changelog.stripdesc(new.description())
if ((not node)
and newdesc == old.description()
and user == old.user()
and date == old.date()
and pureextra == old.extra()):
# nothing changed. continuing here would create a new node
# anyway because of the amend_source noise.
#
# This not what we expect from amend.
return old.node()
ph = repo.ui.config('phases', 'new-commit', phases.draft)
try:
repo.ui.setconfig('phases', 'new-commit', old.phase())
newid = repo.commitctx(new)
finally:
repo.ui.setconfig('phases', 'new-commit', ph)
if newid != old.node():
# Reroute the working copy parent to the new changeset
repo.setparents(newid, nullid)
# Move bookmarks from old parent to amend commit
bms = repo.nodebookmarks(old.node())
if bms:
marks = repo._bookmarks
for bm in bms:
marks[bm] = newid
marks.write()
#commit the whole amend process
if obsolete._enabled and newid != old.node():
# mark the new changeset as successor of the rewritten one
new = repo[newid]
obs = [(old, (new,))]
if node:
obs.append((ctx, ()))
obsolete.createmarkers(repo, obs)
tr.close()
finally:
tr.release()
if (not obsolete._enabled) and newid != old.node():
# Strip the intermediate commit (if there was one) and the amended
# commit
if node:
ui.note(_('stripping intermediate changeset %s\n') % ctx)
ui.note(_('stripping amended changeset %s\n') % old)
repair.strip(ui, repo, old.node(), topic='amend-backup')
finally:
if newid is None:
repo.dirstate.invalidate()
lockmod.release(lock, wlock)
return newid
def commiteditor(repo, ctx, subs):
if ctx.description():
return ctx.description()
return commitforceeditor(repo, ctx, subs)
def commitforceeditor(repo, ctx, subs):
edittext = []
modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
if ctx.description():
edittext.append(ctx.description())
edittext.append("")
edittext.append("") # Empty line between message and comments.
edittext.append(_("HG: Enter commit message."
" Lines beginning with 'HG:' are removed."))
edittext.append(_("HG: Leave message empty to abort commit."))
edittext.append("HG: --")
edittext.append(_("HG: user: %s") % ctx.user())
if ctx.p2():
edittext.append(_("HG: branch merge"))
if ctx.branch():
edittext.append(_("HG: branch '%s'") % ctx.branch())
if bookmarks.iscurrent(repo):
edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
edittext.extend([_("HG: subrepo %s") % s for s in subs])
edittext.extend([_("HG: added %s") % f for f in added])
edittext.extend([_("HG: changed %s") % f for f in modified])
edittext.extend([_("HG: removed %s") % f for f in removed])
if not added and not modified and not removed:
edittext.append(_("HG: no files changed"))
edittext.append("")
# run editor in the repository root
olddir = os.getcwd()
os.chdir(repo.root)
text = repo.ui.edit("\n".join(edittext), ctx.user())
text = re.sub("(?m)^HG:.*(\n|$)", "", text)
os.chdir(olddir)
if not text.strip():
raise util.Abort(_("empty commit message"))
return text
def commitstatus(repo, node, branch, bheads=None, opts={}):
ctx = repo[node]
parents = ctx.parents()
if (not opts.get('amend') and bheads and node not in bheads and not
[x for x in parents if x.node() in bheads and x.branch() == branch]):
repo.ui.status(_('created new head\n'))
# The message is not printed for initial roots. For the other
# changesets, it is printed in the following situations:
#
# Par column: for the 2 parents with ...
# N: null or no parent
# B: parent is on another named branch
# C: parent is a regular non head changeset
# H: parent was a branch head of the current branch
# Msg column: whether we print "created new head" message
# In the following, it is assumed that there already exists some
# initial branch heads of the current branch, otherwise nothing is
# printed anyway.
#
# Par Msg Comment
# N N y additional topo root
#
# B N y additional branch root
# C N y additional topo head
# H N n usual case
#
# B B y weird additional branch root
# C B y branch merge
# H B n merge with named branch
#
# C C y additional head from merge
# C H n merge with a head
#
# H H n head merge: head count decreases
if not opts.get('close_branch'):
for r in parents:
if r.closesbranch() and r.branch() == branch:
repo.ui.status(_('reopening closed branch head %d\n') % r)
if repo.ui.debugflag:
repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
elif repo.ui.verbose:
repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
def revert(ui, repo, ctx, parents, *pats, **opts):
parent, p2 = parents
node = ctx.node()
mf = ctx.manifest()
if node == parent:
pmf = mf
else:
pmf = None
# need all matching names in dirstate and manifest of target rev,
# so have to walk both. do not print errors if files exist in one
# but not other.
names = {}
wlock = repo.wlock()
try:
# walk dirstate.
m = scmutil.match(repo[None], pats, opts)
m.bad = lambda x, y: False
for abs in repo.walk(m):
names[abs] = m.rel(abs), m.exact(abs)
# walk target manifest.
def badfn(path, msg):
if path in names:
return
if path in ctx.substate:
return
path_ = path + '/'
for f in names:
if f.startswith(path_):
return
ui.warn("%s: %s\n" % (m.rel(path), msg))
m = scmutil.match(ctx, pats, opts)
m.bad = badfn
for abs in ctx.walk(m):
if abs not in names:
names[abs] = m.rel(abs), m.exact(abs)
# get the list of subrepos that must be reverted
targetsubs = sorted(s for s in ctx.substate if m(s))
m = scmutil.matchfiles(repo, names)
changes = repo.status(match=m)[:4]
modified, added, removed, deleted = map(set, changes)
# if f is a rename, also revert the source
cwd = repo.getcwd()
for f in added:
src = repo.dirstate.copied(f)
if src and src not in names and repo.dirstate[src] == 'r':
removed.add(src)
names[src] = (repo.pathto(src, cwd), True)
def removeforget(abs):
if repo.dirstate[abs] == 'a':
return _('forgetting %s\n')
return _('removing %s\n')
revert = ([], _('reverting %s\n'))
add = ([], _('adding %s\n'))
remove = ([], removeforget)
undelete = ([], _('undeleting %s\n'))
disptable = (
# dispatch table:
# file state
# action if in target manifest
# action if not in target manifest
# make backup if in target manifest
# make backup if not in target manifest
(modified, revert, remove, True, True),
(added, revert, remove, True, False),
(removed, undelete, None, True, False),
(deleted, revert, remove, False, False),
)
for abs, (rel, exact) in sorted(names.items()):
mfentry = mf.get(abs)
target = repo.wjoin(abs)
def handle(xlist, dobackup):
xlist[0].append(abs)
if (dobackup and not opts.get('no_backup') and
os.path.lexists(target) and
abs in ctx and repo[None][abs].cmp(ctx[abs])):
bakname = "%s.orig" % rel
ui.note(_('saving current version of %s as %s\n') %
(rel, bakname))
if not opts.get('dry_run'):
util.rename(target, bakname)
if ui.verbose or not exact:
msg = xlist[1]
if not isinstance(msg, basestring):
msg = msg(abs)
ui.status(msg % rel)
for table, hitlist, misslist, backuphit, backupmiss in disptable:
if abs not in table:
continue
# file has changed in dirstate
if mfentry:
handle(hitlist, backuphit)
elif misslist is not None:
handle(misslist, backupmiss)
break
else:
if abs not in repo.dirstate:
if mfentry:
handle(add, True)
elif exact:
ui.warn(_('file not managed: %s\n') % rel)
continue
# file has not changed in dirstate
if node == parent:
if exact:
ui.warn(_('no changes needed to %s\n') % rel)
continue
if pmf is None:
# only need parent manifest in this unlikely case,
# so do not read by default
pmf = repo[parent].manifest()
if abs in pmf and mfentry:
# if version of file is same in parent and target
# manifests, do nothing
if (pmf[abs] != mfentry or
pmf.flags(abs) != mf.flags(abs)):
handle(revert, False)
else:
handle(remove, False)
if not opts.get('dry_run'):
def checkout(f):
fc = ctx[f]
repo.wwrite(f, fc.data(), fc.flags())
audit_path = scmutil.pathauditor(repo.root)
for f in remove[0]:
if repo.dirstate[f] == 'a':
repo.dirstate.drop(f)
continue
audit_path(f)
try:
util.unlinkpath(repo.wjoin(f))
except OSError:
pass
repo.dirstate.remove(f)
normal = None
if node == parent:
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
if p2 != nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
for f in revert[0]:
checkout(f)
if normal:
normal(f)
for f in add[0]:
checkout(f)
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
if node == parent and p2 == nullid:
normal = repo.dirstate.normal
for f in undelete[0]:
checkout(f)
normal(f)
copied = copies.pathcopies(repo[parent], ctx)
for f in add[0] + undelete[0] + revert[0]:
if f in copied:
repo.dirstate.copy(copied[f], f)
if targetsubs:
# Revert the subrepos on the revert list
for sub in targetsubs:
ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
finally:
wlock.release()
def command(table):
'''returns a function object bound to table which can be used as
a decorator for populating table as a command table'''
def cmd(name, options=(), synopsis=None):
def decorator(func):
if synopsis:
table[name] = func, list(options), synopsis
else:
table[name] = func, list(options)
return func
return decorator
return cmd
# a list of (ui, repo) functions called by commands.summary
summaryhooks = util.hooks()
# A list of state files kept by multistep operations like graft.
# Since graft cannot be aborted, it is considered 'clearable' by update.
# note: bisect is intentionally excluded
# (state file, clearable, allowcommit, error, hint)
unfinishedstates = [
('graftstate', True, False, _('graft in progress'),
_("use 'hg graft --continue' or 'hg update' to abort")),
('updatestate', True, False, _('last update was interrupted'),
_("use 'hg update' to get a consistent checkout"))
]
def checkunfinished(repo, commit=False):
'''Look for an unfinished multistep operation, like graft, and abort
if found. It's probably good to check this right before
bailifchanged().
'''
for f, clearable, allowcommit, msg, hint in unfinishedstates:
if commit and allowcommit:
continue
if repo.vfs.exists(f):
raise util.Abort(msg, hint=hint)
def clearunfinished(repo):
'''Check for unfinished operations (as above), and clear the ones
that are clearable.
'''
for f, clearable, allowcommit, msg, hint in unfinishedstates:
if not clearable and repo.vfs.exists(f):
raise util.Abort(msg, hint=hint)
for f, clearable, allowcommit, msg, hint in unfinishedstates:
if clearable and repo.vfs.exists(f):
util.unlink(repo.join(f))
|
josephbolus/dokku | refs/heads/master | tests/apps/python-flask/hello.py | 236 | import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return 'python/flask'
|
DBuildService/atomic-reactor | refs/heads/master | tests/plugins/test_compress.py | 1 | import os
import tarfile
import pytest
from atomic_reactor.constants import (EXPORTED_COMPRESSED_IMAGE_NAME_TEMPLATE,
IMAGE_TYPE_DOCKER_ARCHIVE)
from atomic_reactor.core import DockerTasker
from atomic_reactor.plugin import PostBuildPluginsRunner
from atomic_reactor.plugins.post_compress import CompressPlugin
from osbs.utils import ImageName
from atomic_reactor.build import BuildResult
from tests.constants import INPUT_IMAGE, MOCK
if MOCK:
from tests.docker_mock import mock_docker
class Y(object):
dockerfile_path = None
path = None
class X(object):
image_id = INPUT_IMAGE
source = Y()
base_image = ImageName.parse('asd')
class TestCompress(object):
@pytest.mark.parametrize('source_build', (True, False))
@pytest.mark.parametrize('method, load_exported_image, give_export, extension', [
('gzip', False, True, 'gz'),
('lzma', False, False, 'xz'),
('gzip', True, True, 'gz'),
('gzip', True, False, 'gz'),
('spam', True, True, None),
])
def test_compress(self, tmpdir, caplog, workflow,
source_build, method,
load_exported_image, give_export, extension):
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow.builder = X()
exp_img = os.path.join(str(tmpdir), 'img.tar')
if source_build:
workflow.build_result = BuildResult(oci_image_path="oci_path")
else:
workflow.build_result = BuildResult(image_id="12345")
if load_exported_image and give_export:
tarfile.open(exp_img, mode='w').close()
workflow.exported_image_sequence.append({'path': exp_img,
'type': IMAGE_TYPE_DOCKER_ARCHIVE})
tasker = None # image provided, should not query docker
runner = PostBuildPluginsRunner(
tasker,
workflow,
[{
'name': CompressPlugin.key,
'args': {
'method': method,
'load_exported_image': load_exported_image,
},
}]
)
if not extension:
with pytest.raises(Exception) as excinfo:
runner.run()
assert 'Unsupported compression format' in str(excinfo.value)
return
runner.run()
if source_build and not (give_export and load_exported_image):
assert 'skipping, no exported source image to compress' in caplog.text
else:
compressed_img = os.path.join(
workflow.source.tmpdir,
EXPORTED_COMPRESSED_IMAGE_NAME_TEMPLATE.format(extension))
assert os.path.exists(compressed_img)
metadata = workflow.exported_image_sequence[-1]
assert metadata['path'] == compressed_img
assert metadata['type'] == IMAGE_TYPE_DOCKER_ARCHIVE
assert 'uncompressed_size' in metadata
assert isinstance(metadata['uncompressed_size'], int)
assert ", ratio: " in caplog.text
def test_skip_plugin(self, caplog, workflow):
if MOCK:
mock_docker()
tasker = DockerTasker()
workflow.builder = X()
workflow.user_params['scratch'] = True
runner = PostBuildPluginsRunner(
tasker,
workflow,
[{
'name': CompressPlugin.key,
'args': {
'method': 'gzip',
'load_exported_image': True,
},
}]
)
runner.run()
assert 'scratch build, skipping plugin' in caplog.text
|
msebire/intellij-community | refs/heads/master | python/testData/postfix/not/and.py | 39 | def f():
return True and False.not<caret> |
JuliBakagianni/CEF-ELRC | refs/heads/master | lib/python2.7/site-packages/unidecode/x013.py | 252 | data = (
'ja', # 0x00
'ju', # 0x01
'ji', # 0x02
'jaa', # 0x03
'jee', # 0x04
'je', # 0x05
'jo', # 0x06
'jwa', # 0x07
'ga', # 0x08
'gu', # 0x09
'gi', # 0x0a
'gaa', # 0x0b
'gee', # 0x0c
'ge', # 0x0d
'go', # 0x0e
'[?]', # 0x0f
'gwa', # 0x10
'[?]', # 0x11
'gwi', # 0x12
'gwaa', # 0x13
'gwee', # 0x14
'gwe', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'gga', # 0x18
'ggu', # 0x19
'ggi', # 0x1a
'ggaa', # 0x1b
'ggee', # 0x1c
'gge', # 0x1d
'ggo', # 0x1e
'[?]', # 0x1f
'tha', # 0x20
'thu', # 0x21
'thi', # 0x22
'thaa', # 0x23
'thee', # 0x24
'the', # 0x25
'tho', # 0x26
'thwa', # 0x27
'cha', # 0x28
'chu', # 0x29
'chi', # 0x2a
'chaa', # 0x2b
'chee', # 0x2c
'che', # 0x2d
'cho', # 0x2e
'chwa', # 0x2f
'pha', # 0x30
'phu', # 0x31
'phi', # 0x32
'phaa', # 0x33
'phee', # 0x34
'phe', # 0x35
'pho', # 0x36
'phwa', # 0x37
'tsa', # 0x38
'tsu', # 0x39
'tsi', # 0x3a
'tsaa', # 0x3b
'tsee', # 0x3c
'tse', # 0x3d
'tso', # 0x3e
'tswa', # 0x3f
'tza', # 0x40
'tzu', # 0x41
'tzi', # 0x42
'tzaa', # 0x43
'tzee', # 0x44
'tze', # 0x45
'tzo', # 0x46
'[?]', # 0x47
'fa', # 0x48
'fu', # 0x49
'fi', # 0x4a
'faa', # 0x4b
'fee', # 0x4c
'fe', # 0x4d
'fo', # 0x4e
'fwa', # 0x4f
'pa', # 0x50
'pu', # 0x51
'pi', # 0x52
'paa', # 0x53
'pee', # 0x54
'pe', # 0x55
'po', # 0x56
'pwa', # 0x57
'rya', # 0x58
'mya', # 0x59
'fya', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
' ', # 0x61
'.', # 0x62
',', # 0x63
';', # 0x64
':', # 0x65
':: ', # 0x66
'?', # 0x67
'//', # 0x68
'1', # 0x69
'2', # 0x6a
'3', # 0x6b
'4', # 0x6c
'5', # 0x6d
'6', # 0x6e
'7', # 0x6f
'8', # 0x70
'9', # 0x71
'10+', # 0x72
'20+', # 0x73
'30+', # 0x74
'40+', # 0x75
'50+', # 0x76
'60+', # 0x77
'70+', # 0x78
'80+', # 0x79
'90+', # 0x7a
'100+', # 0x7b
'10,000+', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'a', # 0xa0
'e', # 0xa1
'i', # 0xa2
'o', # 0xa3
'u', # 0xa4
'v', # 0xa5
'ga', # 0xa6
'ka', # 0xa7
'ge', # 0xa8
'gi', # 0xa9
'go', # 0xaa
'gu', # 0xab
'gv', # 0xac
'ha', # 0xad
'he', # 0xae
'hi', # 0xaf
'ho', # 0xb0
'hu', # 0xb1
'hv', # 0xb2
'la', # 0xb3
'le', # 0xb4
'li', # 0xb5
'lo', # 0xb6
'lu', # 0xb7
'lv', # 0xb8
'ma', # 0xb9
'me', # 0xba
'mi', # 0xbb
'mo', # 0xbc
'mu', # 0xbd
'na', # 0xbe
'hna', # 0xbf
'nah', # 0xc0
'ne', # 0xc1
'ni', # 0xc2
'no', # 0xc3
'nu', # 0xc4
'nv', # 0xc5
'qua', # 0xc6
'que', # 0xc7
'qui', # 0xc8
'quo', # 0xc9
'quu', # 0xca
'quv', # 0xcb
'sa', # 0xcc
's', # 0xcd
'se', # 0xce
'si', # 0xcf
'so', # 0xd0
'su', # 0xd1
'sv', # 0xd2
'da', # 0xd3
'ta', # 0xd4
'de', # 0xd5
'te', # 0xd6
'di', # 0xd7
'ti', # 0xd8
'do', # 0xd9
'du', # 0xda
'dv', # 0xdb
'dla', # 0xdc
'tla', # 0xdd
'tle', # 0xde
'tli', # 0xdf
'tlo', # 0xe0
'tlu', # 0xe1
'tlv', # 0xe2
'tsa', # 0xe3
'tse', # 0xe4
'tsi', # 0xe5
'tso', # 0xe6
'tsu', # 0xe7
'tsv', # 0xe8
'wa', # 0xe9
'we', # 0xea
'wi', # 0xeb
'wo', # 0xec
'wu', # 0xed
'wv', # 0xee
'ya', # 0xef
'ye', # 0xf0
'yi', # 0xf1
'yo', # 0xf2
'yu', # 0xf3
'yv', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
Mistobaan/tensorflow | refs/heads/master | tensorflow/contrib/predictor/testing_common.py | 93 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common code used for testing `Predictor`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator as contrib_estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as contrib_model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.saved_model import signature_constants
def get_arithmetic_estimator(core=True, model_dir=None):
"""Returns an `Estimator` that performs basic arithmetic.
Args:
core: if `True`, returns a `tensorflow.python.estimator.Estimator`.
Otherwise, returns a `tensorflow.contrib.learn.Estimator`.
model_dir: directory in which to export checkpoints and saved models.
Returns:
An `Estimator` that performs arithmetic operations on its inputs.
"""
def _model_fn(features, labels, mode):
_ = labels
x = features['x']
y = features['y']
with ops.name_scope('outputs'):
predictions = {'sum': math_ops.add(x, y, name='sum'),
'product': math_ops.multiply(x, y, name='product'),
'difference': math_ops.subtract(x, y, name='difference')}
if core:
export_outputs = {k: export_output.PredictOutput({k: v})
for k, v in predictions.items()}
export_outputs[signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY] = export_outputs['sum']
return model_fn.EstimatorSpec(mode=mode,
predictions=predictions,
export_outputs=export_outputs,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
else:
output_alternatives = {k: (constants.ProblemType.UNSPECIFIED, {k: v})
for k, v in predictions.items()}
return contrib_model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
output_alternatives=output_alternatives,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
if core:
return core_estimator.Estimator(_model_fn)
else:
return contrib_estimator.Estimator(_model_fn, model_dir=model_dir)
def get_arithmetic_input_fn(core=True, train=False):
"""Returns a input functions or serving input receiver function."""
def _input_fn():
with ops.name_scope('inputs'):
x = array_ops.placeholder_with_default(0.0, shape=[], name='x')
y = array_ops.placeholder_with_default(0.0, shape=[], name='y')
label = constant_op.constant(0.0)
features = {'x': x, 'y': y}
if core:
if train:
return features, label
return export_lib.ServingInputReceiver(
features=features,
receiver_tensors=features)
else:
if train:
return features, label
return input_fn_utils.InputFnOps(
features=features,
labels={},
default_inputs=features)
return _input_fn
|
mdsafwan/Deal-My-Stuff | refs/heads/master | Lib/site-packages/MySQLdb/constants/FIELD_TYPE.py | 124 | """MySQL FIELD_TYPE Constants
These constants represent the various column (field) types that are
supported by MySQL.
"""
DECIMAL = 0
TINY = 1
SHORT = 2
LONG = 3
FLOAT = 4
DOUBLE = 5
NULL = 6
TIMESTAMP = 7
LONGLONG = 8
INT24 = 9
DATE = 10
TIME = 11
DATETIME = 12
YEAR = 13
NEWDATE = 14
VARCHAR = 15
BIT = 16
NEWDECIMAL = 246
ENUM = 247
SET = 248
TINY_BLOB = 249
MEDIUM_BLOB = 250
LONG_BLOB = 251
BLOB = 252
VAR_STRING = 253
STRING = 254
GEOMETRY = 255
CHAR = TINY
INTERVAL = ENUM
|
imsparsh/python-social-auth | refs/heads/master | social/backends/stocktwits.py | 3 | """
Stocktwits OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/stocktwits.html
"""
from social.backends.oauth import BaseOAuth2
class StocktwitsOAuth2(BaseOAuth2):
"""Stockwiths OAuth2 backend"""
name = 'stocktwits'
AUTHORIZATION_URL = 'https://api.stocktwits.com/api/2/oauth/authorize'
ACCESS_TOKEN_URL = 'https://api.stocktwits.com/api/2/oauth/token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
DEFAULT_SCOPE = ['read', 'publish_messages', 'publish_watch_lists',
'follow_users', 'follow_stocks']
def get_user_id(self, details, response):
return response['user']['id']
def get_user_details(self, response):
"""Return user details from Stocktwits account"""
try:
first_name, last_name = response['user']['name'].split(' ', 1)
except:
first_name = response['user']['name']
last_name = ''
return {'username': response['user']['username'],
'email': '', # not supplied
'fullname': response['user']['name'],
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://api.stocktwits.com/api/2/account/verify.json',
params={'access_token': access_token}
)
|
brennanmorell/Mariner | refs/heads/master | WhaleLevel.py | 1 | #_____DEPRECATED FOR NOW_____
class WhaleLevel():
def __init__(self, price, volume, orders, current_whale):
self._price = price
self._orders = orders
self._volume = volume
self._current_whale = current_whale
def get_price(self):
return self._price
def get_orders(self):
return self._orders
def add_order(self, order):
self._orders.append(order)
self._volume+=order.get_volume()
self.update_current_whale()
def remove_order(self, order):
self._orders.remove(order)
self._volume-=order.get_volume()
self.update_current_whale()
def get_volume(self):
return self._volume
def get_current_whale(self):
return self._current_whale
def add_volume(self, volume):
self._volume+=volume
def remove_volume(self, volume):
self._volume-=volume
def update_current_whale(self):
max_order = None
max_volume = 0
for order in orders:
if order.get_volume > max_volume:
max_volume = order.get_volume
max_order = order
self.current_whale = max_order
def __str__(self):
return "Whale Level [price: " + str(self._price) + " volume: " + str(self._volume) + " current_whale: " + self._current_whale "]"
def __cmp__(self, other):
return cmp(self._price, other._price)
|
spacehitchhiker/test-task | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
gwpy/gwpy.github.io | refs/heads/master | docs/v0.3/examples/signal/gw150914-1.py | 43 | from gwpy.timeseries import TimeSeries
data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478) |
allenp/odoo | refs/heads/9.0 | addons/l10n_cn_standard/__init__.py | 256 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2007-2014 Jeff Wang(<http://[email protected]>).
|
c-a/jhbuild | refs/heads/github | jhbuild/modtypes/autotools.py | 1 | # jhbuild - a tool to ease building collections of source packages
# Copyright (C) 2001-2006 James Henstridge
# Copyright (C) 2007-2008 Frederic Peters
#
# autotools.py: autotools module type definitions.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__metaclass__ = type
import os
import re
import stat
try:
import hashlib
except ImportError:
import md5 as hashlib
from jhbuild.errors import FatalError, BuildStateError, CommandError
from jhbuild.modtypes import \
DownloadableModule, register_module_type, MakeModule
from jhbuild.versioncontrol.tarball import TarballBranch
__all__ = [ 'AutogenModule' ]
class AutogenModule(MakeModule, DownloadableModule):
'''Base type for modules that are distributed with a Gnome style
"autogen.sh" script and the GNU build tools. Subclasses are
responsible for downloading/updating the working copy.'''
type = 'autogen'
PHASE_CHECKOUT = DownloadableModule.PHASE_CHECKOUT
PHASE_FORCE_CHECKOUT = DownloadableModule.PHASE_FORCE_CHECKOUT
PHASE_CLEAN = 'clean'
PHASE_DISTCLEAN = 'distclean'
PHASE_CONFIGURE = 'configure'
PHASE_BUILD = 'build'
PHASE_CHECK = 'check'
PHASE_DIST = 'dist'
PHASE_INSTALL = 'install'
def __init__(self, name, branch=None,
autogenargs='', makeargs='',
makeinstallargs='',
supports_non_srcdir_builds=True,
skip_autogen=False,
skip_install_phase=False,
autogen_sh='autogen.sh',
makefile='Makefile',
autogen_template=None,
check_target=True,
supports_static_analyzer=True):
MakeModule.__init__(self, name, branch=branch, makeargs=makeargs,
makeinstallargs=makeinstallargs, makefile=makefile)
self.autogenargs = autogenargs
self.supports_non_srcdir_builds = supports_non_srcdir_builds
self.skip_autogen = skip_autogen
self.skip_install_phase = skip_install_phase
self.autogen_sh = autogen_sh
self.autogen_template = autogen_template
self.check_target = check_target
self.supports_install_destdir = True
self.supports_static_analyzer = supports_static_analyzer
def get_srcdir(self, buildscript):
return self.branch.srcdir
def get_builddir(self, buildscript):
if buildscript.config.buildroot and self.supports_non_srcdir_builds:
d = buildscript.config.builddir_pattern % (
self.branch.checkoutdir or self.branch.get_module_basename())
return os.path.join(buildscript.config.buildroot, d)
else:
return self.get_srcdir(buildscript)
def _file_exists_and_is_newer_than(self, potential, other):
try:
other_stbuf = os.stat(other)
potential_stbuf = os.stat(potential)
except OSError, e:
return False
return potential_stbuf.st_mtime > other_stbuf.st_mtime
def _get_configure_cmd(self, buildscript):
if self.configure_cmd is not None:
return self.configure_cmd
if self.autogen_template:
template = self.autogen_template
else:
template = ("%(srcdir)s/%(autogen-sh)s --prefix %(prefix)s"
" --libdir %(libdir)s %(autogenargs)s ")
autogenargs = self.autogenargs + ' ' + self.config.module_autogenargs.get(
self.name, self.config.autogenargs)
vars = {'prefix': buildscript.config.prefix,
'autogen-sh': self.autogen_sh,
'autogenargs': autogenargs}
if buildscript.config.buildroot and self.supports_non_srcdir_builds:
vars['srcdir'] = self.get_srcdir(buildscript)
else:
vars['srcdir'] = '.'
if buildscript.config.use_lib64:
vars['libdir'] = "'${exec_prefix}/lib64'"
else:
vars['libdir'] = "'${exec_prefix}/lib'"
cmd = self.static_analyzer_pre_cmd(buildscript) + template % vars
if self.autogen_sh == 'autoreconf':
cmd = cmd.replace('autoreconf', 'configure')
cmd = cmd.replace('--enable-maintainer-mode', '')
# Fix up the arguments for special cases:
# tarballs: remove --enable-maintainer-mode to avoid breaking build
# tarballs: remove '-- ' to avoid breaking build (GStreamer weirdness)
# non-tarballs: place --prefix and --libdir after '-- ', if present
if self.autogen_sh == 'configure':
cmd = cmd.replace('--enable-maintainer-mode', '')
# Also, don't pass '--', which gstreamer attempts to do, since
# it is royally broken.
cmd = cmd.replace('-- ', '')
else:
# place --prefix and --libdir arguments after '-- '
# (GStreamer weirdness)
if autogenargs.find('-- ') != -1:
p = re.compile('(.*)(--prefix %s )((?:--libdir %s )?)(.*)-- ' %
(buildscript.config.prefix, "'\${exec_prefix}/lib64'"))
cmd = p.sub(r'\1\4-- \2\3', cmd)
# If there is no --exec-prefix in the constructed autogen command, we
# can safely assume it will be the same as {prefix} and substitute it
# right now, so the printed command can be copy/pasted afterwards.
# (GNOME #580272)
if not '--exec-prefix' in template:
cmd = cmd.replace('${exec_prefix}', buildscript.config.prefix)
self.configure_cmd = cmd
return cmd
def skip_configure(self, buildscript, last_phase):
# skip if manually instructed to do so
if self.skip_autogen is True:
return True
# don't skip this stage if we got here from one of the
# following phases:
if last_phase in [self.PHASE_FORCE_CHECKOUT,
self.PHASE_CLEAN,
self.PHASE_BUILD,
self.PHASE_INSTALL]:
return False
if self.skip_autogen == 'never':
return False
# if autogen.sh args has changed, re-run configure
db_entry = buildscript.moduleset.packagedb.get(self.name)
if db_entry:
configure_hash = db_entry.metadata.get('configure-hash')
if configure_hash:
configure_cmd = self._get_configure_cmd(buildscript)
if hashlib.md5(configure_cmd).hexdigest() != configure_hash:
return False
else:
# force one-time reconfigure if no configure-hash
return False
# We can't rely on the autotools maintainer-mode stuff because many
# modules' autogen.sh script includes e.g. gtk-doc and/or intltool,
# which also need to be rerun.
# https://bugzilla.gnome.org/show_bug.cgi?id=660844
if not isinstance(self.branch, TarballBranch):
configsrc = None
srcdir = self.get_srcdir(buildscript)
for name in ['configure.ac', 'configure.in']:
path = os.path.join(srcdir, name)
if os.path.exists(path):
configsrc = path
break
if configsrc is not None:
configure = os.path.join(srcdir, 'configure')
if self._file_exists_and_is_newer_than(configure, configsrc):
return True
return False
def do_configure(self, buildscript):
builddir = self.get_builddir(buildscript)
if buildscript.config.buildroot and not os.path.exists(builddir):
os.makedirs(builddir)
buildscript.set_action(_('Configuring'), self)
srcdir = self.get_srcdir(buildscript)
if self.autogen_sh == 'autogen.sh' and \
not os.path.exists(os.path.join(srcdir, self.autogen_sh)):
# if there is no autogen.sh, automatically fallback to configure
if os.path.exists(os.path.join(srcdir, 'configure')):
self.autogen_sh = 'configure'
try:
if not (os.stat(os.path.join(srcdir, self.autogen_sh))[stat.ST_MODE] & 0111):
os.chmod(os.path.join(srcdir, self.autogen_sh), 0755)
except:
pass
if self.autogen_sh == 'autoreconf':
# autoreconf doesn't honour ACLOCAL_FLAGS, therefore we pass
# a crafted ACLOCAL variable. (GNOME bug 590064)
extra_env = {}
if self.extra_env:
extra_env = self.extra_env.copy()
extra_env['ACLOCAL'] = ' '.join((
extra_env.get('ACLOCAL', os.environ.get('ACLOCAL', 'aclocal')),
extra_env.get('ACLOCAL_FLAGS', os.environ.get('ACLOCAL_FLAGS', ''))))
buildscript.execute(['autoreconf', '-i'], cwd=srcdir,
extra_env=extra_env)
os.chmod(os.path.join(srcdir, 'configure'), 0755)
cmd = self._get_configure_cmd(buildscript)
buildscript.execute(cmd, cwd = builddir, extra_env = self.extra_env)
do_configure.depends = [PHASE_CHECKOUT]
do_configure.error_phases = [PHASE_FORCE_CHECKOUT,
PHASE_CLEAN, PHASE_DISTCLEAN]
def skip_clean(self, buildscript, last_phase):
if 'distclean' in self.config.build_targets:
return True
builddir = self.get_builddir(buildscript)
if not os.path.exists(builddir):
return True
if not os.path.exists(os.path.join(builddir, self.makefile)):
return True
return False
def do_clean(self, buildscript):
buildscript.set_action(_('Cleaning'), self)
makeargs = self.get_makeargs(buildscript)
cmd = '%s %s clean' % (os.environ.get('MAKE', 'make'), makeargs)
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
do_clean.depends = [PHASE_CONFIGURE]
do_clean.error_phases = [PHASE_FORCE_CHECKOUT, PHASE_CONFIGURE]
def do_build(self, buildscript):
buildscript.set_action(_('Building'), self)
makeargs = self.get_makeargs(buildscript)
cmd = '%s%s %s' % (self.static_analyzer_pre_cmd(buildscript), os.environ.get('MAKE', 'make'), makeargs)
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
do_build.depends = [PHASE_CONFIGURE]
do_build.error_phases = [PHASE_FORCE_CHECKOUT, PHASE_CONFIGURE,
PHASE_CLEAN, PHASE_DISTCLEAN]
def static_analyzer_pre_cmd(self, buildscript):
if self.supports_static_analyzer and buildscript.config.module_static_analyzer.get(self.name, buildscript.config.static_analyzer):
template = buildscript.config.static_analyzer_template + ' '
outputdir = buildscript.config.static_analyzer_outputdir
if not os.path.exists(outputdir):
os.makedirs(outputdir)
vars = {'outputdir': outputdir,
'module': self.name
}
return template % vars
return ''
def skip_check(self, buildscript, last_phase):
if not self.check_target:
return True
if self.name in buildscript.config.module_makecheck:
return not buildscript.config.module_makecheck[self.name]
if 'check' not in buildscript.config.build_targets:
return True
return False
def do_check(self, buildscript):
buildscript.set_action(_('Checking'), self)
makeargs = self.get_makeargs(buildscript, add_parallel=False)
cmd = '%s%s %s check' % (self.static_analyzer_pre_cmd(buildscript), os.environ.get('MAKE', 'make'), makeargs)
try:
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
except CommandError:
if not buildscript.config.makecheck_advisory:
raise
do_check.depends = [PHASE_BUILD]
do_check.error_phases = [PHASE_FORCE_CHECKOUT, PHASE_CONFIGURE]
def do_dist(self, buildscript):
buildscript.set_action(_('Creating tarball for'), self)
makeargs = self.get_makeargs(buildscript)
cmd = '%s %s dist' % (os.environ.get('MAKE', 'make'), makeargs)
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
do_dist.depends = [PHASE_CONFIGURE]
do_dist.error_phases = [PHASE_FORCE_CHECKOUT, PHASE_CONFIGURE]
def do_distcheck(self, buildscript):
buildscript.set_action(_('Dist checking'), self)
makeargs = self.get_makeargs(buildscript)
cmd = '%s %s distcheck' % (os.environ.get('MAKE', 'make'), makeargs)
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
do_distcheck.depends = [PHASE_DIST]
do_distcheck.error_phases = [PHASE_FORCE_CHECKOUT, PHASE_CONFIGURE]
def do_install(self, buildscript):
buildscript.set_action(_('Installing'), self)
destdir = self.prepare_installroot(buildscript)
if self.makeinstallargs:
cmd = '%s %s DESTDIR=%s' % (os.environ.get('MAKE', 'make'),
self.makeinstallargs,
destdir)
else:
cmd = '%s install DESTDIR=%s' % (os.environ.get('MAKE', 'make'),
destdir)
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
self.process_install(buildscript, self.get_revision())
do_install.depends = [PHASE_BUILD]
def skip_install(self, buildscript, last_phase):
return self.config.noinstall or self.skip_install_phase
def skip_distclean(self, buildscript, last_phase):
builddir = self.get_builddir(buildscript)
if not os.path.exists(builddir):
return True
return False
def do_distclean(self, buildscript):
buildscript.set_action(_('Distcleaning'), self)
if hasattr(self.branch, 'delete_unknown_files'):
self.branch.delete_unknown_files(buildscript)
else:
makeargs = self.get_makeargs(buildscript)
cmd = '%s %s distclean' % (os.environ.get('MAKE', 'make'), makeargs)
buildscript.execute(cmd, cwd = self.get_builddir(buildscript),
extra_env = self.extra_env)
do_distclean.depends = [PHASE_CHECKOUT]
def xml_tag_and_attrs(self):
return ('autotools',
[('autogenargs', 'autogenargs', ''),
('id', 'name', None),
('makeargs', 'makeargs', ''),
('makeinstallargs', 'makeinstallargs', ''),
('supports-non-srcdir-builds',
'supports_non_srcdir_builds', True),
('skip-autogen', 'skip_autogen', False),
('skip-install', 'skip_install_phase', False),
('autogen-sh', 'autogen_sh', 'autogen.sh'),
('makefile', 'makefile', 'Makefile'),
('supports-static-analyzer', 'supports_static_analyzer', True),
('autogen-template', 'autogen_template', None)])
def parse_autotools(node, config, uri, repositories, default_repo):
instance = AutogenModule.parse_from_xml(node, config, uri, repositories, default_repo)
if node.hasAttribute('autogenargs'):
autogenargs = node.getAttribute('autogenargs')
instance.autogenargs = instance.eval_args(autogenargs)
if node.hasAttribute('makeargs'):
makeargs = node.getAttribute('makeargs')
instance.makeargs = instance.eval_args(makeargs)
if node.hasAttribute('makeinstallargs'):
makeinstallargs = node.getAttribute('makeinstallargs')
instance.makeinstallargs = instance.eval_args(makeinstallargs)
if node.hasAttribute('supports-non-srcdir-builds'):
instance.supports_non_srcdir_builds = \
(node.getAttribute('supports-non-srcdir-builds') != 'no')
if node.hasAttribute('skip-autogen'):
skip_autogen = node.getAttribute('skip-autogen')
if skip_autogen == 'true':
instance.skip_autogen = True
elif skip_autogen == 'never':
instance.skip_autogen = 'never'
if node.hasAttribute('skip-install'):
skip_install = node.getAttribute('skip-install')
if skip_install.lower() in ('true', 'yes'):
instance.skip_install_phase = True
else:
instance.skip_install_phase = False
if node.hasAttribute('check-target'):
instance.check_target = (node.getAttribute('check-target') == 'true')
if node.hasAttribute('static-analyzer'):
instance.supports_static_analyzer = (node.getAttribute('static-analyzer') == 'true')
from jhbuild.versioncontrol.tarball import TarballBranch
if node.hasAttribute('autogen-sh'):
autogen_sh = node.getAttribute('autogen-sh')
if autogen_sh is not None:
instance.autogen_sh = autogen_sh
elif isinstance(instance.branch, TarballBranch):
# in tarballs, force autogen-sh to be configure, unless autogen-sh is
# already set
instance.autogen_sh = 'configure'
if node.hasAttribute('makefile'):
instance.makefile = node.getAttribute('makefile')
if node.hasAttribute('autogen-template'):
instance.autogen_template = node.getAttribute('autogen-template')
return instance
register_module_type('autotools', parse_autotools)
|
henaras/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/networks/subnets/urls.py | 54 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.networks.subnets import views
SUBNETS = r'^(?P<subnet_id>[^/]+)/%s$'
VIEW_MOD = 'openstack_dashboard.dashboards.admin.networks.subnets.views'
urlpatterns = patterns(
VIEW_MOD,
url(SUBNETS % 'detail', views.DetailView.as_view(), name='detail')
)
|
JamesMura/sentry | refs/heads/master | src/sentry/south_migrations/0102_ensure_slugs.py | 6 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.models import slugify_instance
for team in orm['sentry.Team'].objects.filter(slug__isnull=True):
slugify_instance(team, team.name)
team.save()
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
LudwigKnuepfer/RIOT | refs/heads/master | tests/pkg_fatfs_vfs/tests/01-run.py | 5 | #!/usr/bin/env python3
# Copyright (C) 2017 HAW-Hamburg.de
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
class TestFailed(Exception):
pass
def testfunc(child):
child.expect(u"Tests for FatFs over VFS - test results will be printed in "
"the format test_name:result\r\n")
while True:
res = child.expect([u"[^\n]*:\[OK\]\r\n",
u"Test end.\r\n",
u".[^\n]*:\[FAILED\]\r\n",
u".*\r\n"])
if res > 1:
raise TestFailed(child.after.split(':', 1)[0] + " test failed!")
elif res == 1:
break
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
import testrunner
sys.exit(testrunner.run(testfunc))
|
JiscPER/esprit | refs/heads/master | esprit/mappings.py | 1 | EXACT = {
"default" : {
"match" : "*",
"match_mapping_type": "string",
"mapping" : {
"type" : "multi_field",
"fields" : {
"{name}" : {"type" : "{dynamic_type}", "index" : "analyzed", "store" : "no"},
"exact" : {"type" : "{dynamic_type}", "index" : "not_analyzed", "store" : "yes"}
}
}
}
}
def properties(field_mappings):
return {"properties" : field_mappings}
def type_mapping(field, type):
return {field : {"type" : type}}
def make_mapping(type):
# FIXME: obviously this is not all there is to it
return {"type" : type}
def dynamic_type_template(name, match, mapping):
return {
name : {
"match" : match,
"mapping" : mapping
}
}
def dynamic_templates(templates):
return {"dynamic_templates" : templates}
def for_type(typename, *mapping):
full_mapping = {}
for m in mapping:
full_mapping.update(m)
return { typename : full_mapping }
def parent(childtype, parenttype):
return {
childtype : {
"_parent" : {
"type" : parenttype
}
}
}
|
vsmolyakov/cv | refs/heads/master | image_search/image_search.py | 1 |
import numpy as np
import matplotlib.pyplot as plt
import os
import random
from PIL import Image
import h5py
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras.optimizers import SGD
from keras import backend as K
from sklearn.decomposition import PCA
from scipy.spatial import distance
from sklearn.manifold import TSNE
K.set_image_dim_ordering('th')
def get_image(path):
img = Image.open(path)
if img.mode != "RGB":
img = img.convert("RGB")
img = img.resize((224, 224), Image.ANTIALIAS)
img = np.array(img.getdata(), np.uint8)
img = img.reshape(224, 224, 3).astype(np.float32)
img = img.transpose((2,0,1))
img = np.expand_dims(img, axis=0)
return img
def VGG_16(weights_path):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
print("finished loading VGG-16 weights...")
return model
def get_concatenated_images(images, indexes, thumb_height):
thumbs = []
for idx in indexes:
img = Image.open(images[idx])
img = img.resize((img.width * thumb_height / img.height, thumb_height), Image.ANTIALIAS)
if img.mode != "RGB":
img = img.convert("RGB")
thumbs.append(img)
concat_image = np.concatenate([np.asarray(t) for t in thumbs], axis=1)
return concat_image
def get_closest_images(acts, query_image_idx, num_results=5):
distances = [distance.euclidean(acts[query_image_idx], act) for act in acts]
idx_closest = sorted(range(len(distances)), key=lambda k: distances[k])[1:num_results+1]
return idx_closest
if __name__ == "__main__":
vgg_path = "./data/vgg16/vgg16_weights.h5"
images_path = "./data/101_ObjectCategories"
num_images = 5000
model = VGG_16(vgg_path)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
images = [os.path.join(dp,f) for dp, dn, filenames in os.walk(images_path) for f in filenames \
if os.path.splitext(f)[1].lower() in ['.jpg','.png','.jpeg']]
if num_images < len(images):
images = [images[i] for i in sorted(random.sample(xrange(len(images)), num_images))]
print('reading %d images...' %len(images))
activations = []
for idx, image_path in enumerate(images):
if idx % 10 == 0:
print('getting activations for %d/%d image...' %(idx,len(images)))
image = get_image(image_path)
acts = model.predict(image)
activations.append(acts)
f = plt.figure()
plt.plot(np.array(activations[0]))
f = plt.savefig('./activations.png')
# reduce activation dimension
print('computing PCA...')
acts = np.concatenate(activations, axis=0)
pca = PCA(n_components=300)
pca.fit(acts)
acts = pca.transform(acts)
# image search
print('image search...')
query_image_idx = int(num_images*random.random())
idx_closest = get_closest_images(acts, query_image_idx)
query_image = get_concatenated_images(images, [query_image_idx], 300)
results_image = get_concatenated_images(images, idx_closest, 300)
f = plt.figure()
plt.imshow(query_image)
plt.title("query image (%d)" %query_image_idx)
f.savefig("./query.png")
f = plt.figure()
plt.imshow(results_image)
plt.title("result images")
f.savefig("./result_images.png")
|
zulip/zulip | refs/heads/master | zerver/migrations/0032_verify_all_medium_avatar_images.py | 3 | import hashlib
from unittest.mock import patch
from django.conf import settings
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from zerver.lib.upload import upload_backend
from zerver.lib.utils import make_safe_digest
from zerver.models import UserProfile
# We hackishly patch this function in order to revert it to the state
# it had when this migration was first written. This is a balance
# between copying in a historical version of hundreds of lines of code
# from zerver.lib.upload (which would pretty annoying, but would be a
# pain) and just using the current version, which doesn't work
# since we rearranged the avatars in Zulip 1.6.
def patched_user_avatar_path(user_profile: UserProfile) -> str:
email = user_profile.email
user_key = email.lower() + settings.AVATAR_SALT
return make_safe_digest(user_key, hashlib.sha1)
@patch("zerver.lib.upload.user_avatar_path", patched_user_avatar_path)
def verify_medium_avatar_image(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
user_profile_model = apps.get_model("zerver", "UserProfile")
for user_profile in user_profile_model.objects.filter(avatar_source="U"):
upload_backend.ensure_avatar_image(user_profile, is_medium=True)
class Migration(migrations.Migration):
dependencies = [
("zerver", "0031_remove_system_avatar_source"),
]
operations = [
migrations.RunPython(verify_medium_avatar_image, elidable=True),
]
|
Weihonghao/ECM | refs/heads/master | Vpy34/lib/python3.5/site-packages/pip/_vendor/html5lib/_trie/_base.py | 354 | from __future__ import absolute_import, division, unicode_literals
from collections import Mapping
class Trie(Mapping):
"""Abstract base class for tries"""
def keys(self, prefix=None):
# pylint:disable=arguments-differ
keys = super(Trie, self).keys()
if prefix is None:
return set(keys)
# Python 2.6: no set comprehensions
return set([x for x in keys if x.startswith(prefix)])
def has_keys_with_prefix(self, prefix):
for key in self.keys():
if key.startswith(prefix):
return True
return False
def longest_prefix(self, prefix):
if prefix in self:
return prefix
for i in range(1, len(prefix) + 1):
if prefix[:-i] in self:
return prefix[:-i]
raise KeyError(prefix)
def longest_prefix_item(self, prefix):
lprefix = self.longest_prefix(prefix)
return (lprefix, self[lprefix])
|
Intel-bigdata/s3-tests | refs/heads/master | setup.py | 5 | #!/usr/bin/python
from setuptools import setup, find_packages
setup(
name='s3tests',
version='0.0.1',
packages=find_packages(),
author='Tommi Virtanen',
author_email='[email protected]',
description='Unofficial Amazon AWS S3 compatibility tests',
license='MIT',
keywords='s3 web testing',
install_requires=[
'boto >=2.0b4',
'PyYAML',
'bunch >=1.0.0',
'gevent >=1.0',
'isodate >=0.4.4',
],
entry_points={
'console_scripts': [
's3tests-generate-objects = s3tests.generate_objects:main',
's3tests-test-readwrite = s3tests.readwrite:main',
's3tests-test-roundtrip = s3tests.roundtrip:main',
's3tests-fuzz-headers = s3tests.fuzz.headers:main',
's3tests-analysis-rwstats = s3tests.analysis.rwstats:main',
],
},
)
|
foxish/test-infra | refs/heads/master | gubernator/main_test.py | 4 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import unittest
import webtest
import cloudstorage as gcs
import main
import gcs_async
import gcs_async_test
write = gcs_async_test.write
app = webtest.TestApp(main.app)
JUNIT_SUITE = """<testsuite tests="8" failures="0" time="1000.24">
<testcase name="First" classname="Example e2e suite" time="0">
<skipped/>
</testcase>
<testcase name="Second" classname="Example e2e suite" time="36.49"/>
<testcase name="Third" classname="Example e2e suite" time="96.49">
<failure>/go/src/k8s.io/kubernetes/test.go:123
Error Goes Here</failure>
</testcase>
</testsuite>"""
def init_build(build_dir, started=True, finished=True,
finished_has_version=False):
"""Create faked files for a build."""
start_json = {'timestamp': 1406535800}
finish_json = {'passed': True, 'result': 'SUCCESS', 'timestamp': 1406536800}
(finish_json if finished_has_version else start_json)['version'] = 'v1+56'
if started:
write(build_dir + 'started.json', start_json)
if finished:
write(build_dir + 'finished.json', finish_json)
write(build_dir + 'artifacts/junit_01.xml', JUNIT_SUITE)
class TestBase(unittest.TestCase):
def init_stubs(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_app_identity_stub()
# redirect GCS calls to the local proxy
gcs_async.GCS_API_URL = gcs.common.local_api_url()
class AppTest(TestBase):
# pylint: disable=too-many-public-methods
BUILD_DIR = '/kubernetes-jenkins/logs/somejob/1234/'
def setUp(self):
self.init_stubs()
init_build(self.BUILD_DIR)
def test_index(self):
"""Test that the index works."""
response = app.get('/')
self.assertIn('kubernetes-e2e-gce', response)
def test_nodelog_missing_files(self):
"""Test that a missing all files gives a 404."""
build_dir = self.BUILD_DIR + 'nodelog?pod=abc'
response = app.get('/build' + build_dir, status=404)
self.assertIn('Unable to find', response)
def test_nodelog_kubelet(self):
"""Test for a kubelet file with junit file.
- missing the default kube-apiserver"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_apiserver(self):
"""Test for default apiserver file
- no kubelet file to find objrefdict
- no file with junit file"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_no_junit(self):
"""Test for when no junit in same folder
- multiple folders"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_nodelog_no_junit_apiserver(self):
"""Test for when no junit in same folder
- multiple folders
- no kube-apiserver.log"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/docker.log',
'Containers\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kubelet.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_no_failed_pod(self):
"""Test that filtering page still loads when no failed pod name is given"""
nodelog_url = self.BUILD_DIR + 'nodelog?junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"} failed)\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_parse_by_timestamp(self):
"""Test parse_by_timestamp and get_woven_logs
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
kubeapi_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, kubeapi_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(kubeapi_filepath,
'0101 01:01:01.000 kubeapi\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 kubeapi\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
print response
self.assertIn(expected, response)
def test_timestamp_no_apiserver(self):
"""Test parse_by_timestamp and get_woven_logs without an apiserver file
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats
- no kube-apiserver.log"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
proxy_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-proxy.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, proxy_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(proxy_filepath,
'0101 01:01:01.000 proxy\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 proxy\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
self.assertIn(expected, response)
|
davidzchen/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_grad_test.py | 9 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CSR sparse matrix tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_grad # pylint: disable=unused-import
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def dense_to_csr_sparse_matrix(dense):
dense_t = ops.convert_to_tensor(dense)
locs = array_ops.stop_gradient(array_ops.where(math_ops.abs(dense_t) > 0))
return sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(dense_t, locs)
def _add_test(test, op_name, testcase_name, fn): # pylint: disable=redefined-outer-name
if fn is None:
return
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class CSRSparseMatrixGradTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(CSRSparseMatrixGradTest, cls).setUpClass()
cls._gpu_available = test_util.is_gpu_available()
# TODO(penporn): Make these tests runnable on eager mode.
# (tf.gradients and gradient_checker only run in graph mode.)
@test_util.run_deprecated_v1
def testLargeBatchConversionGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
mats_val = sparsify(np.random.randn(*dense_shape))
with self.test_session(use_gpu=True) as sess:
mats = math_ops.cast(mats_val, dtype=dtypes.float32)
sparse_mats = dense_to_csr_sparse_matrix(mats)
dense_mats = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_mats, dtypes.float32)
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
grad_out = gradients_impl.gradients([dense_mats], [mats],
[grad_vals])[0]
self.assertEqual(grad_out.dtype, dtypes.float32)
self.assertEqual(grad_out.shape, dense_shape)
grad_out_value = sess.run(grad_out)
tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" %
dense_shape)
self.assertAllEqual(grad_vals, grad_out_value)
@test_util.run_deprecated_v1
def testLargeBatchSparseMatrixAddGrad(self):
if not self._gpu_available:
return
if test.is_built_with_rocm():
self.skipTest("sparse-matrix-add op not supported on ROCm")
sparsify = lambda m: m * (m > 0)
for dense_shape in ([53, 65, 127], [127, 65]):
a_mats_val = sparsify(np.random.randn(*dense_shape))
b_mats_val = sparsify(np.random.randn(*dense_shape))
alpha = np.float32(0.5)
beta = np.float32(-1.5)
grad_vals = np.random.randn(*dense_shape).astype(np.float32)
expected_a_grad = alpha * grad_vals
expected_b_grad = beta * grad_vals
with self.test_session(use_gpu=True) as sess:
a_mats = math_ops.cast(a_mats_val, dtype=dtypes.float32)
b_mats = math_ops.cast(b_mats_val, dtype=dtypes.float32)
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
a_grad, b_grad = gradients_impl.gradients([c_dense], [a_mats, b_mats],
[grad_vals])
self.assertEqual(a_grad.dtype, dtypes.float32)
self.assertEqual(b_grad.dtype, dtypes.float32)
self.assertEqual(a_grad.shape, dense_shape)
self.assertEqual(b_grad.shape, dense_shape)
a_grad_value, b_grad_value = sess.run((a_grad, b_grad))
tf_logging.info("testLargeBatchConversionGrad: Testing shape %s" %
dense_shape)
self.assertAllEqual(expected_a_grad, a_grad_value)
self.assertAllEqual(expected_b_grad, b_grad_value)
if __name__ == "__main__":
test.main()
|
chrisndodge/edx-platform | refs/heads/master | common/djangoapps/embargo/admin.py | 154 | """
Django admin page for embargo models
"""
from django.contrib import admin
import textwrap
from config_models.admin import ConfigurationModelAdmin
from embargo.models import IPFilter, CountryAccessRule, RestrictedCourse
from embargo.forms import IPFilterForm, RestrictedCourseForm
class IPFilterAdmin(ConfigurationModelAdmin):
"""Admin for blacklisting/whitelisting specific IP addresses"""
form = IPFilterForm
fieldsets = (
(None, {
'fields': ('enabled', 'whitelist', 'blacklist'),
'description': textwrap.dedent("""Enter specific IP addresses to explicitly
whitelist (not block) or blacklist (block) in the appropriate box below.
Separate IP addresses with a comma. Do not surround with quotes.
""")
}),
)
class CountryAccessRuleInline(admin.StackedInline):
"""Inline editor for country access rules. """
model = CountryAccessRule
extra = 1
def has_delete_permission(self, request, obj=None):
return True
class RestrictedCourseAdmin(admin.ModelAdmin):
"""Admin for configuring course restrictions. """
inlines = [CountryAccessRuleInline]
form = RestrictedCourseForm
admin.site.register(IPFilter, IPFilterAdmin)
admin.site.register(RestrictedCourse, RestrictedCourseAdmin)
|
saghul/uvent | refs/heads/master | setup.py | 1 | # -*- coding: utf-8 -*-
from setuptools import setup
from uvent import __version__
setup(
name = 'uvent',
version = __version__,
url = 'https://github.com/saghul/uvent',
author = 'Saúl Ibarra Corretgé',
author_email = '[email protected]',
description = 'A Gevent core implemented using libuv',
long_description = open('README.rst', 'r').read(),
#install_requires = ['pyuv>=0.10.0', 'gevent>=1.0'],
packages = ['uvent'],
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
#"Programming Language :: Python :: 3",
#"Programming Language :: Python :: 3.2"
]
)
|
davidnmurray/iris | refs/heads/master | lib/iris/tests/integration/format_interop/__init__.py | 17 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Integration tests for format interoperability."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
|
Zanzibar82/plugin.video.pelisalacarta_ui.pureita | refs/heads/master | servers/sendvid.py | 6 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para sendvid
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("pelisalacarta.servers.sendvid get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
#var video_source = "//cache-2.sendvid.com/1v0chsus.mp4";
media_url = "http:"+scrapertools.find_single_match(data,'var\s+video_source\s+\=\s+"([^"]+)"')
if "cache-1" in media_url:
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" (cache1) [sendvid]",media_url])
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" (cache2) [sendvid]",media_url.replace("cache-1","cache-2")])
elif "cache-2" in media_url:
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" (cache1) [sendvid]",media_url.replace("cache-2","cache-1")])
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" (cache2) [sendvid]",media_url])
else:
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [sendvid]",media_url])
for video_url in video_urls:
logger.info("pelisalacarta.servers.sendvid %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#sendvid.com/embed/1v0chsus
patronvideos = 'sendvid.com/embed/([a-zA-Z0-9]+)'
logger.info("pelisalacarta.servers.sendvid find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[sendvid]"
url = "http://sendvid.com/embed/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'sendvid' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
WillisXChen/django-oscar | refs/heads/master | oscar/lib/python2.7/site-packages/sorl/thumbnail/compat.py | 3 | from __future__ import unicode_literals
import sys
import django
__all__ = [
'json',
'BufferIO',
'urlopen',
'urlparse',
'quote',
'quote_plus',
'URLError',
'force_unicode', 'text_type'
]
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# Django version
if django.VERSION < (1, 5):
from django.utils import simplejson as json
from django.utils.encoding import force_unicode
else:
import json
from django.utils.encoding import force_text as force_unicode
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
# Python 2 and 3
if PY3:
from urllib.error import URLError
from urllib.request import urlopen
from urllib.parse import quote, quote_plus
import urllib.parse as urlparse
from io import BytesIO as BufferIO
text_type = str
string_type = str
def encode(value, charset='utf-8', errors='ignore'):
if isinstance(value, bytes):
return value
return value.encode(charset, errors)
def urlsplit(url):
return urlparse.urlsplit(url.decode('ascii', 'ignore'))
elif PY2:
from urllib2 import URLError
from urllib2 import urlopen
from urllib import quote, quote_plus
import urlparse
from cStringIO import StringIO as BufferIO
text_type = unicode
string_type = basestring
urlsplit = urlparse.urlsplit
def encode(value, charset='utf-8', errors='ignore'):
if isinstance(value, unicode):
return value.encode(charset, errors)
return unicode(value, errors=errors).encode(charset)
|
pombredanne/similarityPy | refs/heads/master | tests/measure_tests/string_data_tests/__init__.py | 98 | __author__ = 'cenk'
|
YOTOV-LIMITED/kuma | refs/heads/master | kuma/authkeys/views.py | 31 | from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib.auth.decorators import login_required, permission_required
from kuma.core.utils import paginate
from .models import Key
from .forms import KeyForm
ITEMS_PER_PAGE = 15
@login_required
@permission_required('authkeys.add_key', raise_exception=True)
def new(request):
context = {"key": None}
if request.method != "POST":
context['form'] = KeyForm()
else:
context['form'] = KeyForm(request.POST)
if context['form'].is_valid():
new_key = context['form'].save(commit=False)
new_key.user = request.user
context['secret'] = new_key.generate_secret()
new_key.save()
context['key'] = new_key
return render(request, 'authkeys/new.html', context)
@login_required
def list(request):
keys = Key.objects.filter(user=request.user)
return render(request, 'authkeys/list.html', dict(keys=keys))
@login_required
def history(request, pk):
key = get_object_or_404(Key, pk=pk)
if key.user != request.user:
raise PermissionDenied
items = key.history.all().order_by('-pk')
items = paginate(request, items, per_page=ITEMS_PER_PAGE)
context = {
'key': key,
'items': items,
}
return render(request, 'authkeys/history.html', context)
@login_required
@permission_required('authkeys.delete_key', raise_exception=True)
def delete(request, pk):
key = get_object_or_404(Key, pk=pk)
if key.user != request.user:
raise PermissionDenied
if request.method == "POST":
key.delete()
return redirect('authkeys.list')
return render(request, 'authkeys/delete.html', {'key': key})
|
dbertha/odoo | refs/heads/8.0 | addons/purchase_requisition/wizard/bid_line_qty.py | 374 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
richardfergie/googleads-python-lib | refs/heads/master | examples/dfp/v201508/base_rate_service/__init__.py | 618 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
mr-led/stock_carrier | refs/heads/master | __openerp__.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Carrier',
'version': '0.1',
'category': 'Stock',
'description': "",
'author': 'Moldeo Interactive',
'website': 'http://business.moldeo.coop/',
'images': [],
'depends': ['stock','delivery'],
'demo': [],
'data': ['stock_view.xml'],
# 'data': [],
'test': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
evamwangi/bc-7-Todo_List | refs/heads/master | venv/Lib/site-packages/flask_login.py | 26 | # -*- coding: utf-8 -*-
'''
flask.ext.login
---------------
This module provides user session management for Flask. It lets you log
your users in and out in a database-independent manner.
:copyright: (c) 2011 by Matthew Frazier.
:license: MIT/X11, see LICENSE for more details.
'''
__version_info__ = ('0', '3', '2')
__version__ = '.'.join(__version_info__)
__author__ = 'Matthew Frazier'
__maintainer__ = 'Max Countryman'
__license__ = 'MIT/X11'
__copyright__ = '(c) 2011 by Matthew Frazier'
__all__ = ['LoginManager']
from flask import (_request_ctx_stack, abort, current_app, flash, redirect,
request, session, url_for, has_request_context)
from flask.signals import Namespace
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from werkzeug.urls import url_decode, url_encode
from datetime import datetime, timedelta
from functools import wraps
from hashlib import sha512
import hmac
import warnings
import sys
if sys.version < '3': # pragma: no cover
from urlparse import urlparse, urlunparse
else: # pragma: no cover
from urllib.parse import urlparse, urlunparse
unicode = str
_signals = Namespace()
#: A proxy for the current user. If no user is logged in, this will be an
#: anonymous user
current_user = LocalProxy(lambda: _get_user())
#: The default name of the "remember me" cookie (``remember_token``)
COOKIE_NAME = 'remember_token'
#: The default time before the "remember me" cookie expires (365 days).
COOKIE_DURATION = timedelta(days=365)
#: Whether the "remember me" cookie requires Secure; defaults to ``None``
COOKIE_SECURE = None
#: Whether the "remember me" cookie uses HttpOnly or not; defaults to ``False``
COOKIE_HTTPONLY = False
#: The default flash message to display when users need to log in.
LOGIN_MESSAGE = u'Please log in to access this page.'
#: The default flash message category to display when users need to log in.
LOGIN_MESSAGE_CATEGORY = 'message'
#: The default flash message to display when users need to reauthenticate.
REFRESH_MESSAGE = u'Please reauthenticate to access this page.'
#: The default flash message category to display when users need to
#: reauthenticate.
REFRESH_MESSAGE_CATEGORY = 'message'
#: The default attribute to retreive the unicode id of the user
ID_ATTRIBUTE = 'get_id'
#: Default name of the auth header (``Authorization``)
AUTH_HEADER_NAME = 'Authorization'
# A set of session keys that are populated by Flask-Login. Use this set to
# purge keys safely and accurately.
SESSION_KEYS = set(['user_id', 'remember', '_id', '_fresh'])
class LoginManager(object):
'''
This object is used to hold the settings used for logging in. Instances of
:class:`LoginManager` are *not* bound to specific apps, so you can create
one in the main body of your code and then bind it to your
app in a factory function.
'''
def __init__(self, app=None, add_context_processor=True):
#: A class or factory function that produces an anonymous user, which
#: is used when no one is logged in.
self.anonymous_user = AnonymousUserMixin
#: The name of the view to redirect to when the user needs to log in.
#: (This can be an absolute URL as well, if your authentication
#: machinery is external to your application.)
self.login_view = None
#: Names of views to redirect to when the user needs to log in,
#: per blueprint. If the key value is set to None the value of
#: :attr:`login_view` will be used instead.
self.blueprint_login_views = {}
#: The message to flash when a user is redirected to the login page.
self.login_message = LOGIN_MESSAGE
#: The message category to flash when a user is redirected to the login
#: page.
self.login_message_category = LOGIN_MESSAGE_CATEGORY
#: The name of the view to redirect to when the user needs to
#: reauthenticate.
self.refresh_view = None
#: The message to flash when a user is redirected to the 'needs
#: refresh' page.
self.needs_refresh_message = REFRESH_MESSAGE
#: The message category to flash when a user is redirected to the
#: 'needs refresh' page.
self.needs_refresh_message_category = REFRESH_MESSAGE_CATEGORY
#: The mode to use session protection in. This can be either
#: ``'basic'`` (the default) or ``'strong'``, or ``None`` to disable
#: it.
self.session_protection = 'basic'
#: If present, used to translate flash messages ``self.login_message``
#: and ``self.needs_refresh_message``
self.localize_callback = None
self.token_callback = None
self.user_callback = None
self.unauthorized_callback = None
self.needs_refresh_callback = None
self.id_attribute = ID_ATTRIBUTE
self.header_callback = None
self.request_callback = None
if app is not None:
self.init_app(app, add_context_processor)
def setup_app(self, app, add_context_processor=True): # pragma: no cover
'''
This method has been deprecated. Please use
:meth:`LoginManager.init_app` instead.
'''
warnings.warn('Warning setup_app is deprecated. Please use init_app.',
DeprecationWarning)
self.init_app(app, add_context_processor)
def init_app(self, app, add_context_processor=True):
'''
Configures an application. This registers an `after_request` call, and
attaches this `LoginManager` to it as `app.login_manager`.
:param app: The :class:`flask.Flask` object to configure.
:type app: :class:`flask.Flask`
:param add_context_processor: Whether to add a context processor to
the app that adds a `current_user` variable to the template.
Defaults to ``True``.
:type add_context_processor: bool
'''
app.login_manager = self
app.after_request(self._update_remember_cookie)
self._login_disabled = app.config.get('LOGIN_DISABLED', False)
if add_context_processor:
app.context_processor(_user_context_processor)
def unauthorized(self):
'''
This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- If the app is using blueprints find the login view for
the current blueprint using `blueprint_login_views`. If the app
is not using blueprints or the login view for the current
blueprint is not specified use the value of `login_view`.
Redirect the user to the login view. (The page they were
attempting to access will be passed in the ``next`` query
string variable, so you can redirect there if present instead
of the homepage.)
If :attr:`LoginManager.login_view` is not defined, then it will simply
raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
'''
user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if request.blueprint in self.blueprint_login_views:
login_view = self.blueprint_login_views[request.blueprint]
else:
login_view = self.login_view
if not login_view:
abort(401)
if self.login_message:
if self.localize_callback is not None:
flash(self.localize_callback(self.login_message),
category=self.login_message_category)
else:
flash(self.login_message, category=self.login_message_category)
return redirect(login_url(login_view, request.url))
def user_loader(self, callback):
'''
This sets the callback for reloading a user from the session. The
function you set should take a user ID (a ``unicode``) and return a
user object, or ``None`` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable
'''
self.user_callback = callback
return callback
def header_loader(self, callback):
'''
This sets the callback for loading a user from a header value.
The function you set should take an authentication token and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable
'''
self.header_callback = callback
return callback
def request_loader(self, callback):
'''
This sets the callback for loading a user from a Flask request.
The function you set should take Flask request object and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable
'''
self.request_callback = callback
return callback
def token_loader(self, callback):
'''
This sets the callback for loading a user from an authentication
token. The function you set should take an authentication token
(a ``unicode``, as returned by a user's `get_auth_token` method) and
return a user object, or ``None`` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: callable
'''
self.token_callback = callback
return callback
def unauthorized_handler(self, callback):
'''
This will set the callback for the `unauthorized` method, which among
other things is used by `login_required`. It takes no arguments, and
should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: callable
'''
self.unauthorized_callback = callback
return callback
def needs_refresh_handler(self, callback):
'''
This will set the callback for the `needs_refresh` method, which among
other things is used by `fresh_login_required`. It takes no arguments,
and should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: callable
'''
self.needs_refresh_callback = callback
return callback
def needs_refresh(self):
'''
This is called when the user is logged in, but they need to be
reauthenticated because their session is stale. If you register a
callback with `needs_refresh_handler`, then it will be called.
Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.needs_refresh_message` to the user.
- Redirect the user to :attr:`LoginManager.refresh_view`. (The page
they were attempting to access will be passed in the ``next``
query string variable, so you can redirect there if present
instead of the homepage.)
If :attr:`LoginManager.refresh_view` is not defined, then it will
simply raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
'''
user_needs_refresh.send(current_app._get_current_object())
if self.needs_refresh_callback:
return self.needs_refresh_callback()
if not self.refresh_view:
abort(401)
if self.localize_callback is not None:
flash(self.localize_callback(self.needs_refresh_message),
category=self.needs_refresh_message_category)
else:
flash(self.needs_refresh_message,
category=self.needs_refresh_message_category)
return redirect(login_url(self.refresh_view, request.url))
def reload_user(self, user=None):
ctx = _request_ctx_stack.top
if user is None:
user_id = session.get('user_id')
if user_id is None:
ctx.user = self.anonymous_user()
else:
if self.user_callback is None:
raise Exception(
"No user_loader has been installed for this "
"LoginManager. Add one with the "
"'LoginManager.user_loader' decorator.")
user = self.user_callback(user_id)
if user is None:
ctx.user = self.anonymous_user()
else:
ctx.user = user
else:
ctx.user = user
def _load_user(self):
'''Loads user from session or remember_me cookie as applicable'''
user_accessed.send(current_app._get_current_object())
# first check SESSION_PROTECTION
config = current_app.config
if config.get('SESSION_PROTECTION', self.session_protection):
deleted = self._session_protection()
if deleted:
return self.reload_user()
# If a remember cookie is set, and the session is not, move the
# cookie user ID to the session.
#
# However, the session may have been set if the user has been
# logged out on this request, 'remember' would be set to clear,
# so we should check for that and not restore the session.
is_missing_user_id = 'user_id' not in session
if is_missing_user_id:
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
header_name = config.get('AUTH_HEADER_NAME', AUTH_HEADER_NAME)
has_cookie = (cookie_name in request.cookies and
session.get('remember') != 'clear')
if has_cookie:
return self._load_from_cookie(request.cookies[cookie_name])
elif self.request_callback:
return self._load_from_request(request)
elif header_name in request.headers:
return self._load_from_header(request.headers[header_name])
return self.reload_user()
def _session_protection(self):
sess = session._get_current_object()
ident = _create_identifier()
app = current_app._get_current_object()
mode = app.config.get('SESSION_PROTECTION', self.session_protection)
# if the sess is empty, it's an anonymous user or just logged out
# so we can skip this
if sess and ident != sess.get('_id', None):
if mode == 'basic' or sess.permanent:
sess['_fresh'] = False
session_protected.send(app)
return False
elif mode == 'strong':
for k in SESSION_KEYS:
sess.pop(k, None)
sess['remember'] = 'clear'
session_protected.send(app)
return True
return False
def _load_from_cookie(self, cookie):
if self.token_callback:
user = self.token_callback(cookie)
if user is not None:
session['user_id'] = getattr(user, self.id_attribute)()
session['_fresh'] = False
_request_ctx_stack.top.user = user
else:
self.reload_user()
else:
user_id = decode_cookie(cookie)
if user_id is not None:
session['user_id'] = user_id
session['_fresh'] = False
self.reload_user()
if _request_ctx_stack.top.user is not None:
app = current_app._get_current_object()
user_loaded_from_cookie.send(app, user=_get_user())
def _load_from_header(self, header):
user = None
if self.header_callback:
user = self.header_callback(header)
if user is not None:
self.reload_user(user=user)
app = current_app._get_current_object()
user_loaded_from_header.send(app, user=_get_user())
else:
self.reload_user()
def _load_from_request(self, request):
user = None
if self.request_callback:
user = self.request_callback(request)
if user is not None:
self.reload_user(user=user)
app = current_app._get_current_object()
user_loaded_from_request.send(app, user=_get_user())
else:
self.reload_user()
def _update_remember_cookie(self, response):
# Don't modify the session unless there's something to do.
if 'remember' in session:
operation = session.pop('remember', None)
if operation == 'set' and 'user_id' in session:
self._set_cookie(response)
elif operation == 'clear':
self._clear_cookie(response)
return response
def _set_cookie(self, response):
# cookie settings
config = current_app.config
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
duration = config.get('REMEMBER_COOKIE_DURATION', COOKIE_DURATION)
domain = config.get('REMEMBER_COOKIE_DOMAIN')
path = config.get('REMEMBER_COOKIE_PATH', '/')
secure = config.get('REMEMBER_COOKIE_SECURE', COOKIE_SECURE)
httponly = config.get('REMEMBER_COOKIE_HTTPONLY', COOKIE_HTTPONLY)
# prepare data
if self.token_callback:
data = current_user.get_auth_token()
else:
data = encode_cookie(unicode(session['user_id']))
expires = datetime.utcnow() + duration
# actually set it
response.set_cookie(cookie_name,
value=data,
expires=expires,
domain=domain,
path=path,
secure=secure,
httponly=httponly)
def _clear_cookie(self, response):
config = current_app.config
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
domain = config.get('REMEMBER_COOKIE_DOMAIN')
path = config.get('REMEMBER_COOKIE_PATH', '/')
response.delete_cookie(cookie_name, domain=domain, path=path)
class UserMixin(object):
'''
This provides default implementations for the methods that Flask-Login
expects user objects to have.
'''
@property
def is_active(self):
return True
@property
def is_authenticated(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except AttributeError:
raise NotImplementedError('No `id` attribute - override `get_id`')
def __eq__(self, other):
'''
Checks the equality of two `UserMixin` objects using `get_id`.
'''
if isinstance(other, UserMixin):
return self.get_id() == other.get_id()
return NotImplemented
def __ne__(self, other):
'''
Checks the inequality of two `UserMixin` objects using `get_id`.
'''
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
if sys.version_info[0] != 2: # pragma: no cover
# Python 3 implicitly set __hash__ to None if we override __eq__
# We set it back to its default implementation
__hash__ = object.__hash__
class AnonymousUserMixin(object):
'''
This is the default object for representing an anonymous user.
'''
@property
def is_authenticated(self):
return False
@property
def is_active(self):
return False
@property
def is_anonymous(self):
return True
def get_id(self):
return
def encode_cookie(payload):
'''
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
'''
return u'{0}|{1}'.format(payload, _cookie_digest(payload))
def decode_cookie(cookie):
'''
This decodes a cookie given by `encode_cookie`. If verification of the
cookie fails, ``None`` will be implicitly returned.
:param cookie: An encoded cookie.
:type cookie: str
'''
try:
payload, digest = cookie.rsplit(u'|', 1)
if hasattr(digest, 'decode'):
digest = digest.decode('ascii') # pragma: no cover
except ValueError:
return
if safe_str_cmp(_cookie_digest(payload), digest):
return payload
def make_next_param(login_url, current_url):
'''
Reduces the scheme and host from a given URL so it can be passed to
the given `login` URL more efficiently.
:param login_url: The login URL being redirected to.
:type login_url: str
:param current_url: The URL to reduce.
:type current_url: str
'''
l = urlparse(login_url)
c = urlparse(current_url)
if (not l.scheme or l.scheme == c.scheme) and \
(not l.netloc or l.netloc == c.netloc):
return urlunparse(('', '', c.path, c.params, c.query, ''))
return current_url
def login_url(login_view, next_url=None, next_field='next'):
'''
Creates a URL for redirecting to a login page. If only `login_view` is
provided, this will just return the URL for it. If `next_url` is provided,
however, this will append a ``next=URL`` parameter to the query string
so that the login view can redirect back to that URL.
:param login_view: The name of the login view. (Alternately, the actual
URL to the login view.)
:type login_view: str
:param next_url: The URL to give the login view for redirection.
:type next_url: str
:param next_field: What field to store the next URL in. (It defaults to
``next``.)
:type next_field: str
'''
if login_view.startswith(('https://', 'http://', '/')):
base = login_view
else:
base = url_for(login_view)
if next_url is None:
return base
parts = list(urlparse(base))
md = url_decode(parts[4])
md[next_field] = make_next_param(base, next_url)
parts[4] = url_encode(md, sort=True)
return urlunparse(parts)
def make_secure_token(*args, **options):
'''
This will create a secure token that you can use as an authentication
token for your users. It uses heavy-duty HMAC encryption to prevent people
from guessing the information. (To make it even more effective, if you
will never need to regenerate the token, you can pass some random data
as one of the arguments.)
:param \*args: The data to include in the token.
:type args: args
:param \*\*options: To manually specify a secret key, pass ``key=THE_KEY``.
Otherwise, the ``current_app`` secret key will be used.
:type \*\*options: kwargs
'''
key = options.get('key')
key = _secret_key(key)
l = [s if isinstance(s, bytes) else s.encode('utf-8') for s in args]
payload = b'\0'.join(l)
token_value = hmac.new(key, payload, sha512).hexdigest()
if hasattr(token_value, 'decode'): # pragma: no cover
token_value = token_value.decode('utf-8') # ensure bytes
return token_value
def login_fresh():
'''
This returns ``True`` if the current login is fresh.
'''
return session.get('_fresh', False)
def login_user(user, remember=False, force=False, fresh=True):
'''
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` property is ``False``, they will not be logged in
unless `force` is ``True``.
This will return ``True`` if the log in attempt succeeds, and ``False`` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:type user: object
:param remember: Whether to remember the user after their session expires.
Defaults to ``False``.
:type remember: bool
:param force: If the user is inactive, setting this to ``True`` will log
them in regardless. Defaults to ``False``.
:type force: bool
:param fresh: setting this to ``False`` will log in the user with a session
marked as not "fresh". Defaults to ``True``.
:type fresh: bool
'''
if not force and not user.is_active:
return False
user_id = getattr(user, current_app.login_manager.id_attribute)()
session['user_id'] = user_id
session['_fresh'] = fresh
session['_id'] = _create_identifier()
if remember:
session['remember'] = 'set'
_request_ctx_stack.top.user = user
user_logged_in.send(current_app._get_current_object(), user=_get_user())
return True
def logout_user():
'''
Logs a user out. (You do not need to pass the actual user.) This will
also clean up the remember me cookie if it exists.
'''
user = _get_user()
if 'user_id' in session:
session.pop('user_id')
if '_fresh' in session:
session.pop('_fresh')
cookie_name = current_app.config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
if cookie_name in request.cookies:
session['remember'] = 'clear'
user_logged_out.send(current_app._get_current_object(), user=user)
current_app.login_manager.reload_user()
return True
def confirm_login():
'''
This sets the current session as fresh. Sessions become stale when they
are reloaded from a cookie.
'''
session['_fresh'] = True
session['_id'] = _create_identifier()
user_login_confirmed.send(current_app._get_current_object())
def login_required(func):
'''
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the :attr:`LoginManager.unauthorized` callback.) For
example::
@app.route('/post')
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
...which is essentially the code that this function adds to your views.
It can be convenient to globally turn off authentication when unit testing.
To enable this, if the application configuration variable `LOGIN_DISABLED`
is set to `True`, this decorator will be ignored.
:param func: The view function to decorate.
:type func: function
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view
def fresh_login_required(func):
'''
If you decorate a view with this, it will ensure that the current user's
login is fresh - i.e. there session was not restored from a 'remember me'
cookie. Sensitive operations, like changing a password or e-mail, should
be protected with this, to impede the efforts of cookie thieves.
If the user is not authenticated, :meth:`LoginManager.unauthorized` is
called as normal. If they are authenticated, but their session is not
fresh, it will call :meth:`LoginManager.needs_refresh` instead. (In that
case, you will need to provide a :attr:`LoginManager.refresh_view`.)
Behaves identically to the :func:`login_required` decorator with respect
to configutation variables.
:param func: The view function to decorate.
:type func: function
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
elif not login_fresh():
return current_app.login_manager.needs_refresh()
return func(*args, **kwargs)
return decorated_view
def set_login_view(login_view, blueprint=None):
'''
Sets the login view for the app or blueprint. If a blueprint is passed,
the login view is set for this blueprint on ``blueprint_login_views``.
:param login_view: The user object to log in.
:type login_view: str
:param blueprint: The blueprint which this login view should be set on.
Defaults to ``None``.
:type blueprint: object
'''
num_login_views = len(current_app.login_manager.blueprint_login_views)
if blueprint is not None or num_login_views != 0:
(current_app.login_manager
.blueprint_login_views[blueprint.name]) = login_view
if (current_app.login_manager.login_view is not None and
None not in current_app.login_manager.blueprint_login_views):
(current_app.login_manager
.blueprint_login_views[None]) = (current_app.login_manager
.login_view)
current_app.login_manager.login_view = None
else:
current_app.login_manager.login_view = login_view
def _get_user():
if has_request_context() and not hasattr(_request_ctx_stack.top, 'user'):
current_app.login_manager._load_user()
return getattr(_request_ctx_stack.top, 'user', None)
def _cookie_digest(payload, key=None):
key = _secret_key(key)
return hmac.new(key, payload.encode('utf-8'), sha512).hexdigest()
def _get_remote_addr():
address = request.headers.get('X-Forwarded-For', request.remote_addr)
if address is not None:
# An 'X-Forwarded-For' header includes a comma separated list of the
# addresses, the first address being the actual remote address.
address = address.encode('utf-8').split(b',')[0].strip()
return address
def _create_identifier():
user_agent = request.headers.get('User-Agent')
if user_agent is not None:
user_agent = user_agent.encode('utf-8')
base = '{0}|{1}'.format(_get_remote_addr(), user_agent)
if str is bytes:
base = unicode(base, 'utf-8', errors='replace') # pragma: no cover
h = sha512()
h.update(base.encode('utf8'))
return h.hexdigest()
def _user_context_processor():
return dict(current_user=_get_user())
def _secret_key(key=None):
if key is None:
key = current_app.config['SECRET_KEY']
if isinstance(key, unicode): # pragma: no cover
key = key.encode('latin1') # ensure bytes
return key
# Signals
#: Sent when a user is logged in. In addition to the app (which is the
#: sender), it is passed `user`, which is the user being logged in.
user_logged_in = _signals.signal('logged-in')
#: Sent when a user is logged out. In addition to the app (which is the
#: sender), it is passed `user`, which is the user being logged out.
user_logged_out = _signals.signal('logged-out')
#: Sent when the user is loaded from the cookie. In addition to the app (which
#: is the sender), it is passed `user`, which is the user being reloaded.
user_loaded_from_cookie = _signals.signal('loaded-from-cookie')
#: Sent when the user is loaded from the header. In addition to the app (which
#: is the #: sender), it is passed `user`, which is the user being reloaded.
user_loaded_from_header = _signals.signal('loaded-from-header')
#: Sent when the user is loaded from the request. In addition to the app (which
#: is the #: sender), it is passed `user`, which is the user being reloaded.
user_loaded_from_request = _signals.signal('loaded-from-request')
#: Sent when a user's login is confirmed, marking it as fresh. (It is not
#: called for a normal login.)
#: It receives no additional arguments besides the app.
user_login_confirmed = _signals.signal('login-confirmed')
#: Sent when the `unauthorized` method is called on a `LoginManager`. It
#: receives no additional arguments besides the app.
user_unauthorized = _signals.signal('unauthorized')
#: Sent when the `needs_refresh` method is called on a `LoginManager`. It
#: receives no additional arguments besides the app.
user_needs_refresh = _signals.signal('needs-refresh')
#: Sent whenever the user is accessed/loaded
#: receives no additional arguments besides the app.
user_accessed = _signals.signal('accessed')
#: Sent whenever session protection takes effect, and a session is either
#: marked non-fresh or deleted. It receives no additional arguments besides
#: the app.
session_protected = _signals.signal('session-protected')
|
da1z/intellij-community | refs/heads/master | python/testData/inspections/PyUnboundLocalVariableInspection/ControlFlowInTryExceptFinally.py | 83 | def foo1():
a = 1
try:
for i in range(10):
pass
except Exception:
pass
finally:
b = a #pass
def foo2():
a = 1
try:
for i in range(10):
pass
except Exception:
c = a #pass
finally:
b = a #pass
|
robertwb/incubator-beam | refs/heads/master | sdks/python/apache_beam/io/gcp/tests/pubsub_matcher_test.py | 5 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit test for PubSub verifier."""
# pytype: skip-file
import logging
import unittest
import mock
from hamcrest import assert_that as hc_assert_that
from apache_beam.io.gcp.pubsub import PubsubMessage
from apache_beam.io.gcp.tests.pubsub_matcher import PubSubMessageMatcher
from apache_beam.testing.test_utils import PullResponseMessage
from apache_beam.testing.test_utils import create_pull_response
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
@unittest.skipIf(pubsub is None, 'PubSub dependencies are not installed.')
@mock.patch('time.sleep', return_value=None)
@mock.patch('google.cloud.pubsub.SubscriberClient')
class PubSubMatcherTest(unittest.TestCase):
def setUp(self):
self.mock_presult = mock.MagicMock()
def init_matcher(
self, expected_msg=None, with_attributes=False, strip_attributes=None):
self.pubsub_matcher = PubSubMessageMatcher(
'mock_project',
'mock_sub_name',
expected_msg,
with_attributes=with_attributes,
strip_attributes=strip_attributes)
def init_counter_matcher(self, expected_msg_len=1):
self.pubsub_matcher = PubSubMessageMatcher(
'mock_project', 'mock_sub_name', expected_msg_len=expected_msg_len)
def test_message_matcher_success(self, mock_get_sub, unsued_mock):
self.init_matcher(expected_msg=[b'a', b'b'])
mock_sub = mock_get_sub.return_value
mock_sub.pull.side_effect = [
create_pull_response([PullResponseMessage(b'a', {})]),
create_pull_response([PullResponseMessage(b'b', {})]),
]
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 2)
self.assertEqual(mock_sub.acknowledge.call_count, 2)
def test_message_matcher_attributes_success(self, mock_get_sub, unsued_mock):
self.init_matcher(
expected_msg=[PubsubMessage(b'a', {'k': 'v'})], with_attributes=True)
mock_sub = mock_get_sub.return_value
mock_sub.pull.side_effect = [
create_pull_response([PullResponseMessage(b'a', {'k': 'v'})])
]
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertEqual(mock_sub.acknowledge.call_count, 1)
def test_message_matcher_attributes_fail(self, mock_get_sub, unsued_mock):
self.init_matcher(
expected_msg=[PubsubMessage(b'a', {})], with_attributes=True)
mock_sub = mock_get_sub.return_value
# Unexpected attribute 'k'.
mock_sub.pull.side_effect = [
create_pull_response([PullResponseMessage(b'a', {'k': 'v'})])
]
with self.assertRaisesRegex(AssertionError, r'Unexpected'):
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertEqual(mock_sub.acknowledge.call_count, 1)
def test_message_matcher_strip_success(self, mock_get_sub, unsued_mock):
self.init_matcher(
expected_msg=[PubsubMessage(b'a', {'k': 'v'})],
with_attributes=True,
strip_attributes=['id', 'timestamp'])
mock_sub = mock_get_sub.return_value
mock_sub.pull.side_effect = [
create_pull_response([
PullResponseMessage(
b'a', {
'id': 'foo', 'timestamp': 'bar', 'k': 'v'
})
])
]
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertEqual(mock_sub.acknowledge.call_count, 1)
def test_message_matcher_strip_fail(self, mock_get_sub, unsued_mock):
self.init_matcher(
expected_msg=[PubsubMessage(b'a', {'k': 'v'})],
with_attributes=True,
strip_attributes=['id', 'timestamp'])
mock_sub = mock_get_sub.return_value
# Message is missing attribute 'timestamp'.
mock_sub.pull.side_effect = [
create_pull_response(
[PullResponseMessage(b'a', {
'id': 'foo', 'k': 'v'
})])
]
with self.assertRaisesRegex(AssertionError, r'Stripped attributes'):
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertEqual(mock_sub.acknowledge.call_count, 1)
def test_message_matcher_mismatch(self, mock_get_sub, unused_mock):
self.init_matcher(expected_msg=[b'a'])
mock_sub = mock_get_sub.return_value
mock_sub.pull.side_effect = [
create_pull_response(
[PullResponseMessage(b'c', {}), PullResponseMessage(b'd', {})]),
]
with self.assertRaises(AssertionError) as error:
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertCountEqual([b'c', b'd'], self.pubsub_matcher.messages)
self.assertIn(
'\nExpected: Expected 1 messages.\n but: Got 2 messages.',
str(error.exception.args[0]))
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertEqual(mock_sub.acknowledge.call_count, 1)
def test_message_matcher_timeout(self, mock_get_sub, unused_mock):
self.init_matcher(expected_msg=[b'a'])
mock_sub = mock_get_sub.return_value
mock_sub.return_value.full_name.return_value = 'mock_sub'
self.pubsub_matcher.timeout = 0.1
with self.assertRaisesRegex(AssertionError, r'Expected 1.*\n.*Got 0'):
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertTrue(mock_sub.pull.called)
self.assertEqual(mock_sub.acknowledge.call_count, 0)
def test_message_count_matcher_below_fail(self, mock_get_sub, unused_mock):
self.init_counter_matcher(expected_msg_len=1)
mock_sub = mock_get_sub.return_value
mock_sub.pull.side_effect = [
create_pull_response(
[PullResponseMessage(b'c', {}), PullResponseMessage(b'd', {})]),
]
with self.assertRaises(AssertionError) as error:
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertIn(
'\nExpected: Expected 1 messages.\n but: Got 2 messages.',
str(error.exception.args[0]))
def test_message_count_matcher_above_fail(self, mock_get_sub, unused_mock):
self.init_counter_matcher(expected_msg_len=1)
mock_sub = mock_get_sub.return_value
self.pubsub_matcher.timeout = 0.1
with self.assertRaisesRegex(AssertionError, r'Expected 1.*\n.*Got 0'):
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertTrue(mock_sub.pull.called)
self.assertEqual(mock_sub.acknowledge.call_count, 0)
def test_message_count_matcher_success(self, mock_get_sub, unused_mock):
self.init_counter_matcher(expected_msg_len=15)
mock_sub = mock_get_sub.return_value
mock_sub.pull.side_effect = [
create_pull_response(
[PullResponseMessage(b'a', {'foo': 'bar'}) for _ in range(15)])
]
hc_assert_that(self.mock_presult, self.pubsub_matcher)
self.assertEqual(mock_sub.pull.call_count, 1)
self.assertEqual(mock_sub.acknowledge.call_count, 1)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
j-marjanovic/myhdl | refs/heads/master | myhdl/_join.py | 6 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module that provides join class """
class join(object):
""" Join trigger objects to form a single trigger object. """
def __init__(self, *args):
""" Construct join object
*args -- list of trigger object arguments.
"""
self._args = args
def _generator(self):
yield join(*self._args)
|
alajara/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/py/py/_path/cacheutil.py | 278 | """
This module contains multithread-safe cache implementations.
All Caches have
getorbuild(key, builder)
delentry(key)
methods and allow configuration when instantiating the cache class.
"""
from time import time as gettime
class BasicCache(object):
def __init__(self, maxentries=128):
self.maxentries = maxentries
self.prunenum = int(maxentries - maxentries/8)
self._dict = {}
def clear(self):
self._dict.clear()
def _getentry(self, key):
return self._dict[key]
def _putentry(self, key, entry):
self._prunelowestweight()
self._dict[key] = entry
def delentry(self, key, raising=False):
try:
del self._dict[key]
except KeyError:
if raising:
raise
def getorbuild(self, key, builder):
try:
entry = self._getentry(key)
except KeyError:
entry = self._build(key, builder)
self._putentry(key, entry)
return entry.value
def _prunelowestweight(self):
""" prune out entries with lowest weight. """
numentries = len(self._dict)
if numentries >= self.maxentries:
# evict according to entry's weight
items = [(entry.weight, key)
for key, entry in self._dict.items()]
items.sort()
index = numentries - self.prunenum
if index > 0:
for weight, key in items[:index]:
# in MT situations the element might be gone
self.delentry(key, raising=False)
class BuildcostAccessCache(BasicCache):
""" A BuildTime/Access-counting cache implementation.
the weight of a value is computed as the product of
num-accesses-of-a-value * time-to-build-the-value
The values with the least such weights are evicted
if the cache maxentries threshold is superceded.
For implementation flexibility more than one object
might be evicted at a time.
"""
# time function to use for measuring build-times
def _build(self, key, builder):
start = gettime()
val = builder()
end = gettime()
return WeightedCountingEntry(val, end-start)
class WeightedCountingEntry(object):
def __init__(self, value, oneweight):
self._value = value
self.weight = self._oneweight = oneweight
def value(self):
self.weight += self._oneweight
return self._value
value = property(value)
class AgingCache(BasicCache):
""" This cache prunes out cache entries that are too old.
"""
def __init__(self, maxentries=128, maxseconds=10.0):
super(AgingCache, self).__init__(maxentries)
self.maxseconds = maxseconds
def _getentry(self, key):
entry = self._dict[key]
if entry.isexpired():
self.delentry(key)
raise KeyError(key)
return entry
def _build(self, key, builder):
val = builder()
entry = AgingEntry(val, gettime() + self.maxseconds)
return entry
class AgingEntry(object):
def __init__(self, value, expirationtime):
self.value = value
self.weight = expirationtime
def isexpired(self):
t = gettime()
return t >= self.weight
|
badloop/SickRage | refs/heads/master | lib/unidecode/x0bc.py | 253 | data = (
'mil', # 0x00
'milg', # 0x01
'milm', # 0x02
'milb', # 0x03
'mils', # 0x04
'milt', # 0x05
'milp', # 0x06
'milh', # 0x07
'mim', # 0x08
'mib', # 0x09
'mibs', # 0x0a
'mis', # 0x0b
'miss', # 0x0c
'ming', # 0x0d
'mij', # 0x0e
'mic', # 0x0f
'mik', # 0x10
'mit', # 0x11
'mip', # 0x12
'mih', # 0x13
'ba', # 0x14
'bag', # 0x15
'bagg', # 0x16
'bags', # 0x17
'ban', # 0x18
'banj', # 0x19
'banh', # 0x1a
'bad', # 0x1b
'bal', # 0x1c
'balg', # 0x1d
'balm', # 0x1e
'balb', # 0x1f
'bals', # 0x20
'balt', # 0x21
'balp', # 0x22
'balh', # 0x23
'bam', # 0x24
'bab', # 0x25
'babs', # 0x26
'bas', # 0x27
'bass', # 0x28
'bang', # 0x29
'baj', # 0x2a
'bac', # 0x2b
'bak', # 0x2c
'bat', # 0x2d
'bap', # 0x2e
'bah', # 0x2f
'bae', # 0x30
'baeg', # 0x31
'baegg', # 0x32
'baegs', # 0x33
'baen', # 0x34
'baenj', # 0x35
'baenh', # 0x36
'baed', # 0x37
'bael', # 0x38
'baelg', # 0x39
'baelm', # 0x3a
'baelb', # 0x3b
'baels', # 0x3c
'baelt', # 0x3d
'baelp', # 0x3e
'baelh', # 0x3f
'baem', # 0x40
'baeb', # 0x41
'baebs', # 0x42
'baes', # 0x43
'baess', # 0x44
'baeng', # 0x45
'baej', # 0x46
'baec', # 0x47
'baek', # 0x48
'baet', # 0x49
'baep', # 0x4a
'baeh', # 0x4b
'bya', # 0x4c
'byag', # 0x4d
'byagg', # 0x4e
'byags', # 0x4f
'byan', # 0x50
'byanj', # 0x51
'byanh', # 0x52
'byad', # 0x53
'byal', # 0x54
'byalg', # 0x55
'byalm', # 0x56
'byalb', # 0x57
'byals', # 0x58
'byalt', # 0x59
'byalp', # 0x5a
'byalh', # 0x5b
'byam', # 0x5c
'byab', # 0x5d
'byabs', # 0x5e
'byas', # 0x5f
'byass', # 0x60
'byang', # 0x61
'byaj', # 0x62
'byac', # 0x63
'byak', # 0x64
'byat', # 0x65
'byap', # 0x66
'byah', # 0x67
'byae', # 0x68
'byaeg', # 0x69
'byaegg', # 0x6a
'byaegs', # 0x6b
'byaen', # 0x6c
'byaenj', # 0x6d
'byaenh', # 0x6e
'byaed', # 0x6f
'byael', # 0x70
'byaelg', # 0x71
'byaelm', # 0x72
'byaelb', # 0x73
'byaels', # 0x74
'byaelt', # 0x75
'byaelp', # 0x76
'byaelh', # 0x77
'byaem', # 0x78
'byaeb', # 0x79
'byaebs', # 0x7a
'byaes', # 0x7b
'byaess', # 0x7c
'byaeng', # 0x7d
'byaej', # 0x7e
'byaec', # 0x7f
'byaek', # 0x80
'byaet', # 0x81
'byaep', # 0x82
'byaeh', # 0x83
'beo', # 0x84
'beog', # 0x85
'beogg', # 0x86
'beogs', # 0x87
'beon', # 0x88
'beonj', # 0x89
'beonh', # 0x8a
'beod', # 0x8b
'beol', # 0x8c
'beolg', # 0x8d
'beolm', # 0x8e
'beolb', # 0x8f
'beols', # 0x90
'beolt', # 0x91
'beolp', # 0x92
'beolh', # 0x93
'beom', # 0x94
'beob', # 0x95
'beobs', # 0x96
'beos', # 0x97
'beoss', # 0x98
'beong', # 0x99
'beoj', # 0x9a
'beoc', # 0x9b
'beok', # 0x9c
'beot', # 0x9d
'beop', # 0x9e
'beoh', # 0x9f
'be', # 0xa0
'beg', # 0xa1
'begg', # 0xa2
'begs', # 0xa3
'ben', # 0xa4
'benj', # 0xa5
'benh', # 0xa6
'bed', # 0xa7
'bel', # 0xa8
'belg', # 0xa9
'belm', # 0xaa
'belb', # 0xab
'bels', # 0xac
'belt', # 0xad
'belp', # 0xae
'belh', # 0xaf
'bem', # 0xb0
'beb', # 0xb1
'bebs', # 0xb2
'bes', # 0xb3
'bess', # 0xb4
'beng', # 0xb5
'bej', # 0xb6
'bec', # 0xb7
'bek', # 0xb8
'bet', # 0xb9
'bep', # 0xba
'beh', # 0xbb
'byeo', # 0xbc
'byeog', # 0xbd
'byeogg', # 0xbe
'byeogs', # 0xbf
'byeon', # 0xc0
'byeonj', # 0xc1
'byeonh', # 0xc2
'byeod', # 0xc3
'byeol', # 0xc4
'byeolg', # 0xc5
'byeolm', # 0xc6
'byeolb', # 0xc7
'byeols', # 0xc8
'byeolt', # 0xc9
'byeolp', # 0xca
'byeolh', # 0xcb
'byeom', # 0xcc
'byeob', # 0xcd
'byeobs', # 0xce
'byeos', # 0xcf
'byeoss', # 0xd0
'byeong', # 0xd1
'byeoj', # 0xd2
'byeoc', # 0xd3
'byeok', # 0xd4
'byeot', # 0xd5
'byeop', # 0xd6
'byeoh', # 0xd7
'bye', # 0xd8
'byeg', # 0xd9
'byegg', # 0xda
'byegs', # 0xdb
'byen', # 0xdc
'byenj', # 0xdd
'byenh', # 0xde
'byed', # 0xdf
'byel', # 0xe0
'byelg', # 0xe1
'byelm', # 0xe2
'byelb', # 0xe3
'byels', # 0xe4
'byelt', # 0xe5
'byelp', # 0xe6
'byelh', # 0xe7
'byem', # 0xe8
'byeb', # 0xe9
'byebs', # 0xea
'byes', # 0xeb
'byess', # 0xec
'byeng', # 0xed
'byej', # 0xee
'byec', # 0xef
'byek', # 0xf0
'byet', # 0xf1
'byep', # 0xf2
'byeh', # 0xf3
'bo', # 0xf4
'bog', # 0xf5
'bogg', # 0xf6
'bogs', # 0xf7
'bon', # 0xf8
'bonj', # 0xf9
'bonh', # 0xfa
'bod', # 0xfb
'bol', # 0xfc
'bolg', # 0xfd
'bolm', # 0xfe
'bolb', # 0xff
)
|
vitan/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/i18n/urls.py | 79 | from __future__ import unicode_literals
from django.conf.urls.i18n import i18n_patterns
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.translation import ugettext_lazy as _
urlpatterns = i18n_patterns('',
(r'^simple/$', lambda r: HttpResponse()),
(r'^streaming/$', lambda r: StreamingHttpResponse([_("Yes"), "/", _("No")])),
)
|
sposh-science/pycode-browser | refs/heads/master | Code/Physics/series_sin.py | 1 | from pylab import *
from scipy import *
x = linspace(-pi,pi,40)
a = zeros(40)
plot(x,sin(x))
for n in range(1,5):
sign = (-1)**(n+1)
term = x**(2*n-1) / factorial(2*n-1)
a = a + sign * term
print( n,sign)
plot(x,term)
plot(x,a,'+')
show()
|
geekroot/erpnext | refs/heads/develop | erpnext/patches/v6_4/repost_gle_for_journal_entries_where_reference_name_missing.py | 30 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
def execute():
je_list = frappe.db.sql_list("""select distinct parent from `tabJournal Entry Account` je
where docstatus=1 and ifnull(reference_name, '') !='' and creation > '2015-03-01'
and not exists(select name from `tabGL Entry`
where voucher_type='Journal Entry' and voucher_no=je.parent
and against_voucher_type=je.reference_type
and against_voucher=je.reference_name)""")
for d in je_list:
print(d)
# delete existing gle
frappe.db.sql("delete from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s", d)
# repost gl entries
je = frappe.get_doc("Journal Entry", d)
je.make_gl_entries() |
linktlh/Toontown-journey | refs/heads/master | toontown/coghq/BarrelBase.py | 5 | import random
class BarrelBase:
def getRng(self):
return random.Random(self.entId * self.level.doId)
def getRewardPerGrab(self):
if not hasattr(self, '_reward'):
if self.rewardPerGrabMax > self.rewardPerGrab:
self._reward = self.getRng().randrange(self.rewardPerGrab, self.rewardPerGrabMax + 1)
else:
self._reward = self.rewardPerGrab
return self._reward
def getGagLevel(self):
if not hasattr(self, '_gagLevel'):
if self.gagLevelMax > self.gagLevel:
self._gagLevel = self.getRng().randrange(self.gagLevel, self.gagLevelMax + 1)
else:
self._gagLevel = self.gagLevel
return self._gagLevel
def getGagTrack(self):
if not hasattr(self, '_gagTrack'):
if self.gagTrack == 'random':
tracks = (0, 1, 2, 3, 4, 4, 5, 5, 6)
self._gagTrack = self.getRng().choice(tracks)
else:
self._gagTrack = self.gagTrack
return self._gagTrack
if __dev__:
def setRewardPerGrab(self, rewardPerGrab):
if hasattr(self, '_reward'):
del self._reward
self.rewardPerGrab = rewardPerGrab
def setRewardPerGrabMax(self, rewardPerGrabMax):
if hasattr(self, '_reward'):
del self._reward
self.rewardPerGrabMax = rewardPerGrabMax
def setGagLevel(self, gagLevel):
if hasattr(self, '_gagLevel'):
del self._gagLevel
self.gagLevel = gagLevel
def setGagLevelMax(self, gagLevelMax):
if hasattr(self, '_gagLevel'):
del self._gagLevel
self.gagLevelMax = gagLevelMax
def setGagTrack(self, gagTrack):
if hasattr(self, '_gagTrack'):
del self._gagTrack
self.gagTrack = gagTrack
|
senderista/s3-multipart | refs/heads/master | s3-mp-upload.py | 2 | #!/usr/bin/env python
import argparse
from cStringIO import StringIO
import logging
from math import ceil
from multiprocessing import Pool
import time
import urlparse
import boto
from boto.s3.connection import OrdinaryCallingFormat
parser = argparse.ArgumentParser(description="Transfer large files to S3",
prog="s3-mp-upload")
parser.add_argument("src", type=file, help="The file to transfer")
parser.add_argument("dest", help="The S3 destination object")
parser.add_argument("-np", "--num-processes", help="Number of processors to use",
type=int, default=2)
parser.add_argument("-f", "--force", help="Overwrite an existing S3 key",
action="store_true")
parser.add_argument("-s", "--split", help="Split size, in Mb", type=int, default=50)
parser.add_argument("-rrs", "--reduced-redundancy", help="Use reduced redundancy storage. Default is standard.", default=False, action="store_true")
parser.add_argument("--insecure", dest='secure', help="Use HTTP for connection",
default=True, action="store_false")
parser.add_argument("-t", "--max-tries", help="Max allowed retries for http timeout", type=int, default=5)
parser.add_argument("-v", "--verbose", help="Be more verbose", default=False, action="store_true")
parser.add_argument("-q", "--quiet", help="Be less verbose (for use in cron jobs)", default=False, action="store_true")
logger = logging.getLogger("s3-mp-upload")
def do_part_upload(args):
"""
Upload a part of a MultiPartUpload
Open the target file and read in a chunk. Since we can't pickle
S3Connection or MultiPartUpload objects, we have to reconnect and lookup
the MPU object with each part upload.
:type args: tuple of (string, string, string, int, int, int)
:param args: The actual arguments of this method. Due to lameness of
multiprocessing, we have to extract these outside of the
function definition.
The arguments are: S3 Bucket name, MultiPartUpload id, file
name, the part number, part offset, part size
"""
# Multiprocessing args lameness
bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries = args
logger.debug("do_part_upload got args: %s" % (args,))
# Connect to S3, get the MultiPartUpload
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
s3.is_secure = secure
bucket = s3.lookup(bucket_name)
mpu = None
for mp in bucket.list_multipart_uploads():
if mp.id == mpu_id:
mpu = mp
break
if mpu is None:
raise Exception("Could not find MultiPartUpload %s" % mpu_id)
# Read the chunk from the file
fp = open(fname, 'rb')
fp.seek(start)
data = fp.read(size)
fp.close()
if not data:
raise Exception("Unexpectedly tried to read an empty chunk")
def progress(x,y):
logger.debug("Part %d: %0.2f%%" % (i+1, 100.*x/y))
try:
# Do the upload
t1 = time.time()
mpu.upload_part_from_file(StringIO(data), i+1, cb=progress)
# Print some timings
t2 = time.time() - t1
s = len(data)/1024./1024.
logger.info("Uploaded part %s (%0.2fM) in %0.2fs at %0.2fMBps" % (i+1, s, t2, s/t2))
except Exception, err:
logger.debug("Retry request %d of max %d times" % (current_tries, max_tries))
if (current_tries > max_tries):
logger.error(err)
else:
time.sleep(3)
current_tries += 1
do_part_download(bucket_name, mpu_id, fname, i, start, size, secure, max_tries, current_tries)
def main(src, dest, num_processes=2, split=50, force=False, reduced_redundancy=False, verbose=False, quiet=False, secure=True, max_tries=5):
# Check that dest is a valid S3 url
split_rs = urlparse.urlsplit(dest)
if split_rs.scheme != "s3":
raise ValueError("'%s' is not an S3 url" % dest)
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
s3.is_secure = secure
bucket = s3.lookup(split_rs.netloc)
if bucket == None:
raise ValueError("'%s' is not a valid bucket" % split_rs.netloc)
key = bucket.get_key(split_rs.path)
# See if we're overwriting an existing key
if key is not None:
if not force:
raise ValueError("'%s' already exists. Specify -f to overwrite it" % dest)
# Determine the splits
part_size = max(5*1024*1024, 1024*1024*split)
src.seek(0,2)
size = src.tell()
num_parts = int(ceil(size / part_size))
# If file is less than 5M, just upload it directly
if size < 5*1024*1024:
src.seek(0)
t1 = time.time()
k = boto.s3.key.Key(bucket,split_rs.path)
k.set_contents_from_file(src)
t2 = time.time() - t1
s = size/1024./1024.
logger.info("Finished uploading %0.2fM in %0.2fs (%0.2fMBps)" % (s, t2, s/t2))
return
# Create the multi-part upload object
mpu = bucket.initiate_multipart_upload(split_rs.path, reduced_redundancy=reduced_redundancy)
logger.info("Initialized upload: %s" % mpu.id)
# Generate arguments for invocations of do_part_upload
def gen_args(num_parts, fold_last):
for i in range(num_parts+1):
part_start = part_size*i
if i == (num_parts-1) and fold_last is True:
yield (bucket.name, mpu.id, src.name, i, part_start, part_size*2, secure, max_tries, 0)
break
else:
yield (bucket.name, mpu.id, src.name, i, part_start, part_size, secure, max_tries, 0)
# If the last part is less than 5M, just fold it into the previous part
fold_last = ((size % part_size) < 5*1024*1024)
# Do the thing
try:
# Create a pool of workers
pool = Pool(processes=num_processes)
t1 = time.time()
pool.map_async(do_part_upload, gen_args(num_parts, fold_last)).get(9999999)
# Print out some timings
t2 = time.time() - t1
s = size/1024./1024.
# Finalize
src.close()
mpu.complete_upload()
logger.info("Finished uploading %0.2fM in %0.2fs (%0.2fMBps)" % (s, t2, s/t2))
except KeyboardInterrupt:
logger.warn("Received KeyboardInterrupt, canceling upload")
pool.terminate()
mpu.cancel_upload()
except Exception, err:
logger.error("Encountered an error, canceling upload")
logger.error(err)
mpu.cancel_upload()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
arg_dict = vars(args)
if arg_dict['quiet'] == True:
logger.setLevel(logging.WARNING)
if arg_dict['verbose'] == True:
logger.setLevel(logging.DEBUG)
logger.debug("CLI args: %s" % args)
main(**arg_dict)
|
syhw/contextual_word_segmentation | refs/heads/master | mark_scripts/topicsandcollocations/eval-words.py | 2 | #! /bin/env python
description = """
eval-words.py version of 20th August, 2011
Evaluates the accuracy of specific words in word segmentation output
(e.g., the *.sws or *.avprs files produced by py-cfg).
When the corpus is read, each word is first transformed by matching
it with --match-re, and generating the pattern specified by
--match-pattern. Then the resulting word is looked up in
--rename-file to generate the actual key scored.
Example: to aggregate word scores by frequency:
eval-words.py --renamer-file br-phono.wc.txt <filenames.sws>
Example: if phono-text.txt maps pronunciations to their orthographic forms
this will aggregate scores by orthographic forms.
eval-words.py --renamer-file phono-text.txt <filenames.sws>
To compute the average word f-score, run
eval-words.py --match-template Word <filenames.sws>
"""
import argparse, collections, csv, re, sys
def readparses(inf, simplify_rex, simplify_template):
def simplify(word):
mo = simplify_rex.match(word)
if mo:
return mo.expand(simplify_template)
else:
return word
parses = []
for line in inf:
line = line.strip()
if len(line) == 0:
if len(parses) > 0:
yield parses
parses = []
else:
parses.append(' '.join(simplify(word) for word in line.split()))
if len(parses) > 0:
yield parses
def argmax(keyvals):
return max(keyvals, key=lambda keyval: keyval[1])[0]
def most_frequent_parse(*parses):
"""Counts the number of times each parse appears, and returns the
one that appears most frequently"""
parsecounts = collections.defaultdict(int)
for parse in parses:
parsecounts[parse] += 1
return argmax(parsecounts.iteritems())
def read_data(inf, match_rex, match_template,
simplify_rex, simplify_template,
renamer,
goldlines=None):
parses = list(readparses(inf, simplify_rex, simplify_template))
if len(parses) == 0:
sys.stderr.write("**Error: file %s has no lines\n"%inf.name)
assert(len(parses) > 0)
data = map(most_frequent_parse, *parses)
if goldlines:
if len(goldlines) != len(data):
sys.stderr.write("** Error: %s has %s lines, while gold data has %s lines, so results are bogus\n"
(inf.name, len(data), len(goldlines)))
sys.exit(1)
for lineno, (goldline,line) in enumerate(zip(goldlines,data)):
if goldline != ''.join(line.split()):
sys.stderr.write("** Error: line %s in %s differs from gold line, so results are bogus\n** data: %s\n** gold: %s\n\n" %
(lineno+1, inf.name, line, goldline))
sys.exit(1)
else:
goldlines = [''.join(line.split()) for line in data]
tuples = collections.defaultdict(set)
pos = 0
if renamer:
for line in data:
for word in line.split():
oldpos = pos
pos += len(word)
mo = match_rex.match(word)
if mo:
word = mo.expand(match_template)
if word in renamer:
tuples[renamer[word]].add((oldpos,pos))
else:
for line in data:
for word in line.split():
oldpos = pos
pos += len(word)
mo = match_rex.match(word)
if mo:
word = mo.expand(match_template)
tuples[word].add((oldpos,pos))
return goldlines, tuples
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=description)
parser.add_argument("testfiles", nargs='+',
help="test files to evaluate for word segmentation accuracy")
parser.add_argument("--grammar-re", dest="grammar_re", default=r"_G([^_]+)_",
help="regex mapping test filename to grammar")
parser.add_argument("--scale-re", dest="scale_re", default=r"_s([.0-9]+)_",
help="regex mapping test filename to scale")
parser.add_argument("--fullname-re", "-f", dest="fullname_re",
default=r"([^/]+)\.[a-z]+$",
help="regex mapping test filename to full identifier")
parser.add_argument("--gold", "-g", dest="gold",
default="fm_corpus_tier_gold.seg",
help="file containing gold data")
parser.add_argument("--match-re", "-m", dest="match_re",
# default=r"^(yu|D6|D&t|hIr|pIg|dOgi|bUk|brAS|trAk|kar|tEl6fon|spEnsR)$",
default=r"^(.*)$",
help="regex matching words to score")
parser.add_argument("--match-template", "-t", dest="match_template",
default=r"\1",
help="template used to generate scoring key from match")
parser.add_argument("--output-field-separator",
dest="output_field_separator", default=",",
help="separator between output fields")
parser.add_argument("--renamer-file", "-r", dest="renamer_file",
help="file containing <word> <key> pairs (one per line) mapping words to scoring keys")
parser.add_argument("--simplify-re", "-s", dest="simplify_re",
default=r"^([^_]*)(?:_.*)?$",
help="regex used to simplify tokens during reading")
parser.add_argument("--simplify-template", "-S", dest="simplify_template",
default=r"\1",
help="template used to simplify tokens during reading")
args = parser.parse_args()
grammar_rex = re.compile(args.grammar_re)
scale_rex = re.compile(args.scale_re)
fullname_rex = re.compile(args.fullname_re)
match_rex = re.compile(args.match_re)
simplify_rex = re.compile(args.simplify_re)
renamer = None
if args.renamer_file:
renamer = dict(line.split() for line in file(args.renamer_file, "rU"))
goldlines, gold = read_data(file(args.gold, "rU"),
match_rex, args.match_template,
simplify_rex, args.simplify_template,
renamer)
# words is set of all words that match match_re in gold and test data
words = set(gold.iterkeys())
# eolpositions is set of eol positions
eolpositions = set((0,))
pos = 0
for goldline in goldlines:
pos += len(goldline)
eolpositions.add(pos)
assert(len(eolpositions) == len(goldlines)+1)
fname_data = dict()
for fname in args.testfiles:
inf = file(fname)
goldlines, data = read_data(inf, match_rex, args.match_template,
simplify_rex, args.simplify_template,
renamer,
goldlines)
fname_data[fname] = data
words.update(data.iterkeys())
os = csv.writer(sys.stdout,
delimiter=args.output_field_separator,
lineterminator='\n')
os.writerow(('filename','grammar','scale','word',
'token_gold','token_test','token_correct','token_fscore',
'boundary_gold','boundary_test','boundary_correct','boundary_fscore'))
for fname,data in fname_data.iteritems():
fullname = fname
mo = fullname_rex.search(fname)
if mo:
fullname = mo.group(1)
grammar = None
mo = grammar_rex.search(fname)
if mo:
grammar = mo.group(1)
scale = None
mo = scale_rex.search(fname)
if mo:
scale = mo.group(1)
for word in words:
testdata = data[word]
golddata = gold[word]
token_correct = testdata & golddata
token_ngold = len(golddata)
token_ntest = len(testdata)
token_ncorrect = len(token_correct)
token_fscore = 2*token_ncorrect/(token_ngold+token_ntest+1e-100)
testboundaries = (set(b[0] for b in testdata) | set(b[1] for b in testdata))-eolpositions
goldboundaries = (set(b[0] for b in golddata) | set(b[1] for b in golddata))-eolpositions
boundary_correct = testboundaries & goldboundaries
boundary_ngold = len(goldboundaries)
boundary_ntest = len(testboundaries)
boundary_ncorrect = len(boundary_correct)
boundary_fscore = 2*boundary_ncorrect/(boundary_ngold+boundary_ntest+1e-100)
os.writerow((fullname,grammar,scale,word,
token_ngold, token_ntest, token_ncorrect, token_fscore,
boundary_ngold, boundary_ntest, boundary_ncorrect, boundary_fscore
))
|
patrickleotardif/airflow | refs/heads/master | airflow/operators/postgres_operator.py | 14 | import logging
from airflow.hooks import PostgresHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class PostgresOperator(BaseOperator):
"""
Executes sql code in a specific Postgres database
:param postgres_conn_id: reference to a specific postgres database
:type postgres_conn_id: string
:param sql: the sql code to be executed
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self, sql,
postgres_conn_id='postgres_default', autocommit=False,
*args, **kwargs):
super(PostgresOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.postgres_conn_id = postgres_conn_id
self.autocommit = autocommit
def execute(self, context):
logging.info('Executing: ' + str(self.sql))
self.hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
self.hook.run(self.sql, self.autocommit)
|
mirkobrombin/Bottles | refs/heads/master | src/pages/dialog.py | 1 | # dialog.py
#
# Copyright 2020 brombinmirko <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
class BottlesMessageDialog(Gtk.MessageDialog):
def __init__(self,
parent,
title=_("Warning"),
message=_("An error has occurred."),
log=False):
Gtk.MessageDialog.__init__(self,
parent=parent,
flags=Gtk.DialogFlags.USE_HEADER_BAR,
type=Gtk.MessageType.WARNING,
buttons=Gtk.ButtonsType.OK_CANCEL,
message_format=message)
'''Display log as output if defined'''
if log:
message_scroll = Gtk.ScrolledWindow()
message_scroll.set_hexpand(True)
message_scroll.set_vexpand(True)
message_view = Gtk.TextView()
message_buffer = message_view.get_buffer()
message_buffer.set_text(log)
message_scroll.add(message_view)
content = self.get_content_area()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
box.set_border_width(20)
if log: box.add(message_scroll)
content.add(box)
self.show_all()
class BottlesDialog(Gtk.Dialog):
def __init__(self,
parent,
title=_("Warning"),
message=_("An error has occurred."),
log=False):
Gtk.Dialog.__init__(self,
title=title,
parent=parent,
flags=Gtk.DialogFlags.USE_HEADER_BAR)
'''Display log as output if defined'''
if log:
self.resize(600, 700)
if parent.settings.get_boolean("dark-theme"):
color = "#d4036d"
else:
color = "#3e0622"
message_scroll = Gtk.ScrolledWindow()
message_scroll.set_hexpand(True)
message_scroll.set_vexpand(True)
message_view = Gtk.TextView()
message_buffer = message_view.get_buffer()
buffer_iter = message_buffer.get_end_iter()
message_buffer.insert_markup(
buffer_iter, "<span foreground='%s'>%s</span>" % (color, log), -1)
message_scroll.add(message_view)
content = self.get_content_area()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
box.set_border_width(20)
if log: box.add(message_scroll)
content.add(box)
self.show_all()
@Gtk.Template(resource_path='/com/usebottles/bottles/about.ui')
class BottlesAboutDialog(Gtk.AboutDialog):
__gtype_name__ = 'BottlesAboutDialog'
def __init__(self, **kwargs):
super().__init__(**kwargs)
'''Init template'''
try:
self.init_template()
except TypeError:
self.init_template("")
|
JingJunYin/tensorflow | refs/heads/master | tensorflow/tools/graph_transforms/__init__.py | 152 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exposes the Python wrapper for graph transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.pywrap_tensorflow import TransformGraphWithStringInputs
from tensorflow.python.util import compat
def TransformGraph(input_graph_def, inputs, outputs, transforms):
"""Python wrapper for the Graph Transform Tool.
Gives access to all graph transforms available through the command line tool.
See documentation at https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/README.md
for full details of the options available.
Args:
input_graph_def: GraphDef object containing a model to be transformed.
inputs: List of node names for the model inputs.
outputs: List of node names for the model outputs.
transforms: List of strings containing transform names and parameters.
Returns:
New GraphDef with transforms applied.
"""
input_graph_def_string = input_graph_def.SerializeToString()
inputs_string = compat.as_bytes(",".join(inputs))
outputs_string = compat.as_bytes(",".join(outputs))
transforms_string = compat.as_bytes(" ".join(transforms))
with errors.raise_exception_on_not_ok_status() as status:
output_graph_def_string = TransformGraphWithStringInputs(
input_graph_def_string, inputs_string, outputs_string,
transforms_string, status)
output_graph_def = graph_pb2.GraphDef()
output_graph_def.ParseFromString(output_graph_def_string)
return output_graph_def
|
ChristinaZografou/sympy | refs/heads/parabola2 | sympy/polys/domains/finitefield.py | 117 | """Implementation of :class:`FiniteField` class. """
from __future__ import print_function, division
from sympy.polys.domains.field import Field
from sympy.polys.domains.simpledomain import SimpleDomain
from sympy.polys.domains.groundtypes import SymPyInteger
from sympy.polys.domains.modularinteger import ModularIntegerFactory
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
@public
class FiniteField(Field, SimpleDomain):
"""General class for finite fields. """
rep = 'FF'
is_FiniteField = is_FF = True
is_Numerical = True
has_assoc_Ring = False
has_assoc_Field = True
dom = None
mod = None
def __init__(self, mod, dom=None, symmetric=True):
if mod <= 0:
raise ValueError('modulus must be a positive integer, got %s' % mod)
if dom is None:
from sympy.polys.domains import ZZ
dom = ZZ
self.dtype = ModularIntegerFactory(mod, dom, symmetric, self)
self.zero = self.dtype(0)
self.one = self.dtype(1)
self.dom = dom
self.mod = mod
def __str__(self):
return 'GF(%s)' % self.mod
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.mod, self.dom))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, FiniteField) and \
self.mod == other.mod and self.dom == other.dom
def characteristic(self):
"""Return the characteristic of this domain. """
return self.mod
def get_field(self):
"""Returns a field associated with ``self``. """
return self
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(int(a))
def from_sympy(self, a):
"""Convert SymPy's Integer to SymPy's ``Integer``. """
if a.is_Integer:
return self.dtype(self.dom.dtype(int(a)))
elif a.is_Float and int(a) == a:
return self.dtype(self.dom.dtype(int(a)))
else:
raise CoercionFailed("expected an integer, got %s" % a)
def from_FF_python(K1, a, K0=None):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a.val, K0.dom))
def from_ZZ_python(K1, a, K0=None):
"""Convert Python's ``int`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a, K0))
def from_QQ_python(K1, a, K0=None):
"""Convert Python's ``Fraction`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_python(a.numerator)
def from_FF_gmpy(K1, a, K0=None):
"""Convert ``ModularInteger(mpz)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_gmpy(a.val, K0.dom))
def from_ZZ_gmpy(K1, a, K0=None):
"""Convert GMPY's ``mpz`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_gmpy(a, K0))
def from_QQ_gmpy(K1, a, K0=None):
"""Convert GMPY's ``mpq`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_gmpy(a.numerator)
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to ``dtype``. """
p, q = K0.to_rational(a)
if q == 1:
return K1.dtype(self.dom.dtype(p))
|
LUTAN/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/reader_ops_test.py | 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reader ops from io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gzip
import math
import os
import threading
import zlib
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# pylint: disable=invalid-name
TFRecordCompressionType = tf_record.TFRecordCompressionType
# pylint: enable=invalid-name
# Edgar Allan Poe's 'Eldorado'
_TEXT = b"""Gaily bedight,
A gallant knight,
In sunshine and in shadow,
Had journeyed long,
Singing a song,
In search of Eldorado.
But he grew old
This knight so bold
And o'er his heart a shadow
Fell as he found
No spot of ground
That looked like Eldorado.
And, as his strength
Failed him at length,
He met a pilgrim shadow
'Shadow,' said he,
'Where can it be
This land of Eldorado?'
'Over the Mountains
Of the Moon'
Down the Valley of the Shadow,
Ride, boldly ride,'
The shade replied,
'If you seek for Eldorado!'
"""
class IdentityReaderTest(test.TestCase):
def _ExpectRead(self, sess, key, value, expected):
k, v = sess.run([key, value])
self.assertAllEqual(expected, k)
self.assertAllEqual(expected, v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(0, queued_length.eval())
queue.enqueue_many([["A", "B", "C"]]).run()
queue.close().run()
self.assertAllEqual(3, queued_length.eval())
self._ExpectRead(sess, key, value, b"A")
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"B")
self._ExpectRead(sess, key, value, b"C")
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, work_completed.eval())
self.assertAllEqual(3, produced.eval())
self.assertAllEqual(0, queued_length.eval())
def testMultipleEpochs(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([["DD", "EE"]])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
enqueue.run()
self._ExpectRead(sess, key, value, b"DD")
self._ExpectRead(sess, key, value, b"EE")
queue.close().run()
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testSerializeRestore(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([["X", "Y", "Z"]]).run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, b"X")
self.assertAllEqual(1, produced.eval())
state = reader.serialize_state().eval()
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
self.assertAllEqual(3, produced.eval())
queue.enqueue_many([["Y", "Z"]]).run()
queue.close().run()
reader.restore_state(state).run()
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self._ExpectRead(sess, key, value, b"Z")
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
self.assertAllEqual(3, produced.eval())
self.assertEqual(bytes, type(state))
with self.assertRaises(ValueError):
reader.restore_state([])
with self.assertRaises(ValueError):
reader.restore_state([state, state])
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[1:]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state[:-1]).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(state + b"ExtraJunk").run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"PREFIX" + state).run()
with self.assertRaisesOpError(
"Could not parse state for IdentityReader 'test_reader'"):
reader.restore_state(b"BOGUS" + state[5:]).run()
def testReset(self):
with self.test_session() as sess:
reader = io_ops.IdentityReader("test_reader")
work_completed = reader.num_work_units_completed()
produced = reader.num_records_produced()
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queued_length = queue.size()
key, value = reader.read(queue)
queue.enqueue_many([["X", "Y", "Z"]]).run()
self._ExpectRead(sess, key, value, b"X")
self.assertLess(0, queued_length.eval())
self.assertAllEqual(1, produced.eval())
self._ExpectRead(sess, key, value, b"Y")
self.assertLess(0, work_completed.eval())
self.assertAllEqual(2, produced.eval())
reader.reset().run()
self.assertAllEqual(0, work_completed.eval())
self.assertAllEqual(0, produced.eval())
self.assertAllEqual(1, queued_length.eval())
self._ExpectRead(sess, key, value, b"Z")
queue.enqueue_many([["K", "L"]]).run()
self._ExpectRead(sess, key, value, b"K")
class WholeFileReaderTest(test.TestCase):
def setUp(self):
super(WholeFileReaderTest, self).setUp()
self._filenames = [
os.path.join(self.get_temp_dir(), "whole_file.%d.txt" % i)
for i in range(3)
]
self._content = [b"One\na\nb\n", b"Two\nC\nD", b"Three x, y, z"]
for fn, c in zip(self._filenames, self._content):
with open(fn, "wb") as h:
h.write(c)
def tearDown(self):
for fn in self._filenames:
os.remove(fn)
super(WholeFileReaderTest, self).tearDown()
def _ExpectRead(self, sess, key, value, index):
k, v = sess.run([key, value])
self.assertAllEqual(compat.as_bytes(self._filenames[index]), k)
self.assertAllEqual(self._content[index], v)
def testOneEpoch(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
queue.enqueue_many([self._filenames]).run()
queue.close().run()
key, value = reader.read(queue)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
self._ExpectRead(sess, key, value, 2)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
sess.run([key, value])
def testInfiniteEpochs(self):
with self.test_session() as sess:
reader = io_ops.WholeFileReader("test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
enqueue = queue.enqueue_many([self._filenames])
key, value = reader.read(queue)
enqueue.run()
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
self._ExpectRead(sess, key, value, 1)
enqueue.run()
self._ExpectRead(sess, key, value, 2)
self._ExpectRead(sess, key, value, 0)
class TextLineReaderTest(test.TestCase):
def setUp(self):
super(TextLineReaderTest, self).setUp()
self._num_files = 2
self._num_lines = 5
def _LineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _CreateFiles(self, crlf=False):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
for j in range(self._num_lines):
f.write(self._LineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it sometimes.
if j + 1 != self._num_lines or i == 0:
f.write(b"\r\n" if crlf else b"\n")
return filenames
def _testOneEpoch(self, files):
with self.test_session() as sess:
reader = io_ops.TextLineReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 1), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochLF(self):
self._testOneEpoch(self._CreateFiles(crlf=False))
def testOneEpochCRLF(self):
self._testOneEpoch(self._CreateFiles(crlf=True))
def testSkipHeaderLines(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TextLineReader(skip_header_lines=1, name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_lines - 1):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j + 2), compat.as_text(k))
self.assertAllEqual(self._LineText(i, j + 1), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
self._hop_bytes = 2
self._num_overlapped_records = 3
def _Record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _OverlappedRecord(self, f, r):
record_str = "".join(
[str(i)[0] for i in range(
r * self._hop_bytes, r * self._hop_bytes + self._record_bytes)])
return compat.as_bytes(record_str)
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._Record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def _CreateOverlappedRecordFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_overlapped_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
all_records_str = "".join(
[str(i)[0] for i in range(
self._record_bytes + self._hop_bytes * (self._num_overlapped_records - 1))])
f.write(compat.as_bytes(all_records_str))
f.write(b"F" * self._footer_bytes)
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=0,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testOneEpochWithHopBytes(self):
files = self._CreateOverlappedRecordFiles()
with self.test_session() as sess:
reader = io_ops.FixedLengthRecordReader(
header_bytes=self._header_bytes,
record_bytes=self._record_bytes,
footer_bytes=self._footer_bytes,
hop_bytes=self._hop_bytes,
name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_overlapped_records):
k, v = sess.run([key, value])
print(v)
self.assertAllEqual("%s:%d" % (files[i], j), compat.as_text(k))
self.assertAllEqual(self._OverlappedRecord(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
class TFRecordReaderTest(test.TestCase):
def setUp(self):
super(TFRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = tf_record.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._Record(i, j))
return filenames
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testReadUpTo(self):
files = self._CreateFiles()
with self.test_session() as sess:
reader = io_ops.TFRecordReader(name="test_reader")
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
batch_size = 3
key, value = reader.read_up_to(queue, batch_size)
queue.enqueue_many([files]).run()
queue.close().run()
num_k = 0
num_v = 0
while True:
try:
k, v = sess.run([key, value])
# Test reading *up to* batch_size records
self.assertLessEqual(len(k), batch_size)
self.assertLessEqual(len(v), batch_size)
num_k += len(k)
num_v += len(v)
except errors_impl.OutOfRangeError:
break
# Test that we have read everything
self.assertEqual(self._num_files * self._num_records, num_k)
self.assertEqual(self._num_files * self._num_records, num_v)
def testReadZlibFiles(self):
files = self._CreateFiles()
zlib_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([zlib_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % zlib_files[i]))
self.assertAllEqual(self._Record(i, j), v)
def testReadGzipFiles(self):
files = self._CreateFiles()
gzip_files = []
for i, fn in enumerate(files):
with open(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(zfn, "wb") as f:
f.write(cdata)
gzip_files.append(zfn)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([gzip_files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % gzip_files[i]))
self.assertAllEqual(self._Record(i, j), v)
class TFRecordWriterZlibTest(test.TestCase):
def setUp(self):
super(TFRecordWriterZlibTest, self).setUp()
self._num_files = 2
self._num_records = 7
def _Record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _CreateFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
writer = tf_record.TFRecordWriter(fn, options=options)
for j in range(self._num_records):
writer.write(self._Record(i, j))
writer.close()
del writer
return filenames
def _WriteRecordsToFile(self, records, name="tf_record"):
fn = os.path.join(self.get_temp_dir(), name)
writer = tf_record.TFRecordWriter(fn, options=None)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibCompressFile(self, infile, name="tfrecord.z"):
# zlib compress the file and write compressed contents to file.
with open(infile, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testOneEpoch(self):
files = self._CreateFiles()
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue_many([files]).run()
queue.close().run()
for i in range(self._num_files):
for j in range(self._num_records):
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % files[i]))
self.assertAllEqual(self._Record(i, j), v)
with self.assertRaisesOpError("is closed and has insufficient elements "
"\\(requested 1, current size 0\\)"):
k, v = sess.run([key, value])
def testZLibFlushRecord(self):
fn = self._WriteRecordsToFile([b"small record"], "small_record")
with open(fn, "rb") as h:
buff = h.read()
# creating more blocks and trailing blocks shouldn't break reads
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS)
output = b""
for c in buff:
if isinstance(c, int):
c = six.int2byte(c)
output += compressor.compress(c)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FULL_FLUSH)
output += compressor.flush(zlib.Z_FINISH)
# overwrite the original file with the compressed data
with open(fn, "wb") as h:
h.write(output)
with self.test_session() as sess:
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = io_ops.TFRecordReader(name="test_reader", options=options)
queue = data_flow_ops.FIFOQueue(1, [dtypes.string], shapes=())
key, value = reader.read(queue)
queue.enqueue(fn).run()
queue.close().run()
k, v = sess.run([key, value])
self.assertTrue(compat.as_text(k).startswith("%s:" % fn))
self.assertAllEqual(b"small record", v)
def testZlibReadWrite(self):
"""Verify that files produced are zlib compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "zlib_read_write.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testZlibReadWriteLarge(self):
"""Verify that writing large contents also works."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteRecordsToFile(original, "zlib_read_write_large.tfrecord")
zfn = self._ZlibCompressFile(fn, "zlib_read_write_large.tfrecord.z")
# read the compressed contents and verify.
actual = []
for r in tf_record.tf_record_iterator(
zfn,
options=tf_record.TFRecordOptions(
tf_record.TFRecordCompressionType.ZLIB)):
actual.append(r)
self.assertEqual(actual, original)
def testGzipReadWrite(self):
"""Verify that files produced are gzip compatible."""
original = [b"foo", b"bar"]
fn = self._WriteRecordsToFile(original, "gzip_read_write.tfrecord")
# gzip compress the file and write compressed contents to file.
with open(fn, "rb") as f:
cdata = f.read()
gzfn = os.path.join(self.get_temp_dir(), "tf_record.gz")
with gzip.GzipFile(gzfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(
gzfn, options=tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)):
actual.append(r)
self.assertEqual(actual, original)
class TFRecordIteratorTest(test.TestCase):
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def _Record(self, r):
return compat.as_bytes("Record %d" % r)
def _WriteCompressedRecordsToFile(
self,
records,
name="tfrecord.z",
compression_type=tf_record.TFRecordCompressionType.ZLIB):
fn = os.path.join(self.get_temp_dir(), name)
options = tf_record.TFRecordOptions(compression_type=compression_type)
writer = tf_record.TFRecordWriter(fn, options=options)
for r in records:
writer.write(r)
writer.close()
del writer
return fn
def _ZlibDecompressFile(self, infile, name="tfrecord", wbits=zlib.MAX_WBITS):
with open(infile, "rb") as f:
cdata = zlib.decompress(f.read(), wbits)
zfn = os.path.join(self.get_temp_dir(), name)
with open(zfn, "wb") as f:
f.write(cdata)
return zfn
def testIterator(self):
fn = self._WriteCompressedRecordsToFile(
[self._Record(i) for i in range(self._num_records)],
"compressed_records")
options = tf_record.TFRecordOptions(
compression_type=TFRecordCompressionType.ZLIB)
reader = tf_record.tf_record_iterator(fn, options)
for i in range(self._num_records):
record = next(reader)
self.assertAllEqual(self._Record(i), record)
with self.assertRaises(StopIteration):
record = next(reader)
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteZlibReadLarge(self):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
fn = self._WriteCompressedRecordsToFile(original,
"write_zlib_read_large.tfrecord.z")
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tf_record")
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testWriteGzipRead(self):
original = [b"foo", b"bar"]
fn = self._WriteCompressedRecordsToFile(
original,
"write_gzip_read.tfrecord.gz",
compression_type=TFRecordCompressionType.GZIP)
with gzip.GzipFile(fn, "rb") as f:
cdata = f.read()
zfn = os.path.join(self.get_temp_dir(), "tf_record")
with open(zfn, "wb") as f:
f.write(cdata)
actual = []
for r in tf_record.tf_record_iterator(zfn):
actual.append(r)
self.assertEqual(actual, original)
def testBadFile(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords."""
fn = os.path.join(self.get_temp_dir(), "bad_file")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"123")
fn_truncated = os.path.join(self.get_temp_dir(), "bad_file_truncated")
with open(fn, "rb") as f:
with open(fn_truncated, "wb") as f2:
# DataLossError requires that we've written the header, so this must
# be at least 12 bytes.
f2.write(f.read(14))
with self.assertRaises(errors_impl.DataLossError):
for _ in tf_record.tf_record_iterator(fn_truncated):
pass
class AsyncReaderTest(test.TestCase):
def testNoDeadlockFromQueue(self):
"""Tests that reading does not block main execution threads."""
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
thread_data_t = collections.namedtuple("thread_data_t",
["thread", "queue", "output"])
thread_data = []
# Create different readers, each with its own queue.
for i in range(3):
queue = data_flow_ops.FIFOQueue(99, [dtypes.string], shapes=())
reader = io_ops.TextLineReader()
_, line = reader.read(queue)
output = []
t = threading.Thread(
target=AsyncReaderTest._RunSessionAndSave,
args=(sess, [line], output))
thread_data.append(thread_data_t(t, queue, output))
# Start all readers. They are all blocked waiting for queue entries.
sess.run(variables.global_variables_initializer())
for d in thread_data:
d.thread.start()
# Unblock the readers.
for i, d in enumerate(reversed(thread_data)):
fname = os.path.join(self.get_temp_dir(), "deadlock.%s.txt" % i)
with open(fname, "wb") as f:
f.write(("file-%s" % i).encode())
d.queue.enqueue_many([[fname]]).run()
d.thread.join()
self.assertEqual([[("file-%s" % i).encode()]], d.output)
@staticmethod
def _RunSessionAndSave(sess, args, output):
output.append(sess.run(args))
if __name__ == "__main__":
test.main()
|
chienlieu2017/it_management | refs/heads/master | odoo/addons/mrp/models/mrp_production.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
import math
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError
class MrpProduction(models.Model):
""" Manufacturing Orders """
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned_start'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = 'date_planned_start asc,id'
@api.model
def _get_default_picking_type(self):
return self.env['stock.picking.type'].search([
('code', '=', 'mrp_operation'),
('warehouse_id.company_id', 'in', [self.env.context.get('company_id', self.env.user.company_id.id), False])],
limit=1).id
@api.model
def _get_default_location_src_id(self):
location = False
if self._context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_src_id
if not location:
location = self.env.ref('stock.stock_location_stock', raise_if_not_found=False)
return location and location.id or False
@api.model
def _get_default_location_dest_id(self):
location = False
if self._context.get('default_picking_type_id'):
location = self.env['stock.picking.type'].browse(self.env.context['default_picking_type_id']).default_location_dest_id
if not location:
location = self.env.ref('stock.stock_location_stock', raise_if_not_found=False)
return location and location.id or False
name = fields.Char(
'Reference', copy=False, readonly=True, default=lambda x: _('New'))
origin = fields.Char(
'Source', copy=False,
help="Reference of the document that generated this production order request.")
product_id = fields.Many2one(
'product.product', 'Product',
domain=[('type', 'in', ['product', 'consu'])],
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_tmpl_id = fields.Many2one('product.template', 'Product Template', related='product_id.product_tmpl_id')
product_qty = fields.Float(
'Quantity To Produce',
default=1.0, digits=dp.get_precision('Product Unit of Measure'),
readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
product_uom_id = fields.Many2one(
'product.uom', 'Product Unit of Measure',
oldname='product_uom', readonly=True, required=True,
states={'confirmed': [('readonly', False)]})
picking_type_id = fields.Many2one(
'stock.picking.type', 'Picking Type',
default=_get_default_picking_type, required=True)
location_src_id = fields.Many2one(
'stock.location', 'Raw Materials Location',
default=_get_default_location_src_id,
readonly=True, required=True,
states={'confirmed': [('readonly', False)]},
help="Location where the system will look for components.")
location_dest_id = fields.Many2one(
'stock.location', 'Finished Products Location',
default=_get_default_location_dest_id,
readonly=True, required=True,
states={'confirmed': [('readonly', False)]},
help="Location where the system will stock the finished products.")
date_planned_start = fields.Datetime(
'Deadline Start', copy=False, default=fields.Datetime.now,
index=True, required=True,
states={'confirmed': [('readonly', False)]}, oldname="date_planned")
date_planned_finished = fields.Datetime(
'Deadline End', copy=False, default=fields.Datetime.now,
index=True,
states={'confirmed': [('readonly', False)]})
date_start = fields.Datetime('Start Date', copy=False, index=True, readonly=True)
date_finished = fields.Datetime('End Date', copy=False, index=True, readonly=True)
bom_id = fields.Many2one(
'mrp.bom', 'Bill of Material',
readonly=True, states={'confirmed': [('readonly', False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product.")
routing_id = fields.Many2one(
'mrp.routing', 'Routing',
readonly=True, related='bom_id.routing_id', store=True,
help="The list of operations (list of work centers) to produce the finished product. The routing "
"is mainly used to compute work center costs during operations and to plan future loads on "
"work centers based on production planning.")
move_raw_ids = fields.One2many(
'stock.move', 'raw_material_production_id', 'Raw Materials', oldname='move_lines',
copy=False, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
move_finished_ids = fields.One2many(
'stock.move', 'production_id', 'Finished Products',
copy=False, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]},
domain=[('scrapped', '=', False)])
workorder_ids = fields.One2many(
'mrp.workorder', 'production_id', 'Work Orders',
copy=False, oldname='workcenter_lines', readonly=True)
workorder_count = fields.Integer('# Work Orders', compute='_compute_workorder_count')
workorder_done_count = fields.Integer('# Done Work Orders', compute='_compute_workorder_done_count')
state = fields.Selection([
('confirmed', 'Confirmed'),
('planned', 'Planned'),
('progress', 'In Progress'),
('done', 'Done'),
('cancel', 'Cancelled')], string='State',
copy=False, default='confirmed', track_visibility='onchange')
availability = fields.Selection([
('assigned', 'Available'),
('partially_available', 'Partially Available'),
('waiting', 'Waiting'),
('none', 'None')], string='Availability',
compute='_compute_availability', store=True)
post_visible = fields.Boolean(
'Inventory Post Visible', compute='_compute_post_visible',
help='Technical field to check when we can post')
user_id = fields.Many2one('res.users', 'Responsible', default=lambda self: self._uid)
company_id = fields.Many2one(
'res.company', 'Company',
default=lambda self: self.env['res.company']._company_default_get('mrp.production'),
required=True)
check_to_done = fields.Boolean(compute="_get_produced_qty", string="Check Produced Qty",
help="Technical Field to see if we can show 'Mark as Done' button")
qty_produced = fields.Float(compute="_get_produced_qty", string="Quantity Produced")
procurement_group_id = fields.Many2one(
'procurement.group', 'Procurement Group',
copy=False)
procurement_ids = fields.One2many('procurement.order', 'production_id', 'Related Procurements')
propagate = fields.Boolean(
'Propagate cancel and split',
help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too')
has_moves = fields.Boolean(compute='_has_moves')
scrap_ids = fields.One2many('stock.scrap', 'production_id', 'Scraps')
scrap_count = fields.Integer(compute='_compute_scrap_move_count', string='Scrap Move')
priority = fields.Selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
readonly=True, states={'confirmed': [('readonly', False)]}, default='1')
@api.multi
@api.depends('workorder_ids')
def _compute_workorder_count(self):
data = self.env['mrp.workorder'].read_group([('production_id', 'in', self.ids)], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.workorder_count = count_data.get(production.id, 0)
@api.multi
@api.depends('workorder_ids.state')
def _compute_workorder_done_count(self):
data = self.env['mrp.workorder'].read_group([
('production_id', 'in', self.ids),
('state', '=', 'done')], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.workorder_done_count = count_data.get(production.id, 0)
@api.multi
@api.depends('move_raw_ids.state', 'move_raw_ids.partially_available', 'workorder_ids.move_raw_ids', 'bom_id.ready_to_produce')
def _compute_availability(self):
for order in self:
if not order.move_raw_ids:
order.availability = 'none'
continue
if order.bom_id.ready_to_produce == 'all_available':
order.availability = any(move.state not in ('assigned', 'done', 'cancel') for move in order.move_raw_ids) and 'waiting' or 'assigned'
else:
partial_list = [x.partially_available and x.state in ('waiting', 'confirmed', 'assigned') for x in order.move_raw_ids]
assigned_list = [x.state in ('assigned', 'done', 'cancel') for x in order.move_raw_ids]
order.availability = (all(assigned_list) and 'assigned') or (any(partial_list) and 'partially_available') or 'waiting'
@api.multi
@api.depends('move_raw_ids.quantity_done', 'move_finished_ids.quantity_done')
def _compute_post_visible(self):
for order in self:
order.post_visible = any(order.move_raw_ids.filtered(lambda x: (x.quantity_done) > 0 and (x.state not in ['done', 'cancel']))) or \
any(order.move_finished_ids.filtered(lambda x: (x.quantity_done) > 0 and (x.state not in ['done', 'cancel'])))
@api.multi
@api.depends('workorder_ids.state', 'move_finished_ids')
def _get_produced_qty(self):
for production in self:
done_moves = production.move_finished_ids.filtered(lambda x: x.state != 'cancel' and x.product_id.id == production.product_id.id)
qty_produced = sum(done_moves.mapped('quantity_done'))
wo_done = True
if any([x.state not in ('done', 'cancel') for x in production.workorder_ids]):
wo_done = False
production.check_to_done = done_moves and (qty_produced >= production.product_qty) and (production.state not in ('done', 'cancel')) and wo_done
production.qty_produced = qty_produced
return True
@api.multi
@api.depends('move_raw_ids')
def _has_moves(self):
for mo in self:
mo.has_moves = any(mo.move_raw_ids)
@api.multi
def _compute_scrap_move_count(self):
data = self.env['stock.scrap'].read_group([('production_id', 'in', self.ids)], ['production_id'], ['production_id'])
count_data = dict((item['production_id'][0], item['production_id_count']) for item in data)
for production in self:
production.scrap_count = count_data.get(production.id, 0)
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
('qty_positive', 'check (product_qty > 0)', 'The quantity to produce must be positive!'),
]
@api.onchange('product_id', 'picking_type_id', 'company_id')
def onchange_product_id(self):
""" Finds UoM of changed product. """
if not self.product_id:
self.bom_id = False
else:
bom = self.env['mrp.bom']._bom_find(product=self.product_id, picking_type=self.picking_type_id, company_id=self.company_id.id)
if bom.type == 'normal':
self.bom_id = bom.id
else:
self.bom_id = False
self.product_uom_id = self.product_id.uom_id.id
return {'domain': {'product_uom_id': [('category_id', '=', self.product_id.uom_id.category_id.id)]}}
@api.onchange('picking_type_id')
def onchange_picking_type(self):
location = self.env.ref('stock.stock_location_stock')
self.location_src_id = self.picking_type_id.default_location_src_id.id or location.id
self.location_dest_id = self.picking_type_id.default_location_dest_id.id or location.id
@api.onchange('bom_id')
def onchange_bom_id(self):
self.routing_id = self.bom_id.routing_id.id
@api.model
def create(self, values):
if not values.get('name', False) or values['name'] == _('New'):
values['name'] = self.env['ir.sequence'].next_by_code('mrp.production') or _('New')
if not values.get('procurement_group_id'):
values['procurement_group_id'] = self.env["procurement.group"].create({'name': values['name']}).id
production = super(MrpProduction, self).create(values)
production._generate_moves()
return production
@api.multi
def unlink(self):
if any(production.state != 'cancel' for production in self):
raise UserError(_('Cannot delete a manufacturing order not in cancel state'))
return super(MrpProduction, self).unlink()
@api.multi
def _generate_moves(self):
for production in self:
production._generate_finished_moves()
factor = production.product_uom_id._compute_quantity(production.product_qty, production.bom_id.product_uom_id) / production.bom_id.product_qty
boms, lines = production.bom_id.explode(production.product_id, factor, picking_type=production.bom_id.picking_type_id)
production._generate_raw_moves(lines)
# Check for all draft moves whether they are mto or not
self._adjust_procure_method()
self.move_raw_ids.action_confirm()
return True
def _generate_finished_moves(self):
move = self.env['stock.move'].create({
'name': self.name,
'date': self.date_planned_start,
'date_expected': self.date_planned_start,
'product_id': self.product_id.id,
'product_uom': self.product_uom_id.id,
'product_uom_qty': self.product_qty,
'location_id': self.product_id.property_stock_production.id,
'location_dest_id': self.location_dest_id.id,
'move_dest_id': self.procurement_ids and self.procurement_ids[0].move_dest_id.id or False,
'procurement_id': self.procurement_ids and self.procurement_ids[0].id or False,
'company_id': self.company_id.id,
'production_id': self.id,
'origin': self.name,
'group_id': self.procurement_group_id.id,
})
move.action_confirm()
return move
def _generate_raw_moves(self, exploded_lines):
self.ensure_one()
moves = self.env['stock.move']
for bom_line, line_data in exploded_lines:
moves += self._generate_raw_move(bom_line, line_data)
return moves
def _generate_raw_move(self, bom_line, line_data):
quantity = line_data['qty']
# alt_op needed for the case when you explode phantom bom and all the lines will be consumed in the operation given by the parent bom line
alt_op = line_data['parent_line'] and line_data['parent_line'].operation_id.id or False
if bom_line.child_bom_id and bom_line.child_bom_id.type == 'phantom':
return self.env['stock.move']
if bom_line.product_id.type not in ['product', 'consu']:
return self.env['stock.move']
if self.bom_id.routing_id and self.bom_id.routing_id.location_id:
source_location = self.bom_id.routing_id.location_id
else:
source_location = self.location_src_id
original_quantity = self.product_qty - self.qty_produced
data = {
'name': self.name,
'date': self.date_planned_start,
'bom_line_id': bom_line.id,
'product_id': bom_line.product_id.id,
'product_uom_qty': quantity,
'product_uom': bom_line.product_uom_id.id,
'location_id': source_location.id,
'location_dest_id': self.product_id.property_stock_production.id,
'raw_material_production_id': self.id,
'company_id': self.company_id.id,
'operation_id': bom_line.operation_id.id or alt_op,
'price_unit': bom_line.product_id.standard_price,
'procure_method': 'make_to_stock',
'origin': self.name,
'warehouse_id': source_location.get_warehouse().id,
'group_id': self.procurement_group_id.id,
'propagate': self.propagate,
'unit_factor': quantity / original_quantity,
}
return self.env['stock.move'].create(data)
@api.multi
def _adjust_procure_method(self):
try:
mto_route = self.env['stock.warehouse']._get_mto_route()
except:
mto_route = False
for move in self.move_raw_ids:
product = move.product_id
routes = product.route_ids + product.categ_id.route_ids
# TODO: optimize with read_group?
pull = self.env['procurement.rule'].search([('route_id', 'in', [x.id for x in routes]), ('location_src_id', '=', move.location_id.id),
('location_id', '=', move.location_dest_id.id)], limit=1)
if pull and (pull.procure_method == 'make_to_order'):
move.procure_method = pull.procure_method
elif not pull: # If there is no make_to_stock rule either
if mto_route and mto_route.id in [x.id for x in routes]:
move.procure_method = 'make_to_order'
@api.multi
def _update_raw_move(self, bom_line, line_data):
quantity = line_data['qty']
self.ensure_one()
move = self.move_raw_ids.filtered(lambda x: x.bom_line_id.id == bom_line.id and x.state not in ('done', 'cancel'))
if move:
if quantity > 0:
move[0].write({'product_uom_qty': quantity})
else:
if move[0].quantity_done > 0:
raise UserError(_('Lines need to be deleted, but can not as you still have some quantities to consume in them. '))
move[0].action_cancel()
move[0].unlink()
return move
else:
self._generate_raw_move(bom_line, line_data)
@api.multi
def action_assign(self):
for production in self:
move_to_assign = production.move_raw_ids.filtered(lambda x: x.state in ('confirmed', 'waiting', 'assigned'))
move_to_assign.action_assign()
return True
@api.multi
def open_produce_product(self):
self.ensure_one()
action = self.env.ref('mrp.act_mrp_product_produce').read()[0]
return action
@api.multi
def button_plan(self):
""" Create work orders. And probably do stuff, like things. """
orders_to_plan = self.filtered(lambda order: order.routing_id and order.state == 'confirmed')
for order in orders_to_plan:
quantity = order.product_uom_id._compute_quantity(order.product_qty, order.bom_id.product_uom_id) / order.bom_id.product_qty
boms, lines = order.bom_id.explode(order.product_id, quantity, picking_type=order.bom_id.picking_type_id)
order._generate_workorders(boms)
orders_to_plan.write({'state': 'planned'})
@api.multi
def _generate_workorders(self, exploded_boms):
workorders = self.env['mrp.workorder']
for bom, bom_data in exploded_boms:
# If the routing of the parent BoM and phantom BoM are the same, don't recreate work orders, but use one master routing
if bom.routing_id.id and (not bom_data['parent_line'] or bom_data['parent_line'].bom_id.routing_id.id != bom.routing_id.id):
workorders += self._workorders_create(bom, bom_data)
return workorders
def _workorders_create(self, bom, bom_data):
"""
:param bom: in case of recursive boms: we could create work orders for child
BoMs
"""
workorders = self.env['mrp.workorder']
bom_qty = bom_data['qty']
# Initial qty producing
if self.product_id.tracking == 'serial':
quantity = 1.0
else:
quantity = self.product_qty - sum(self.move_finished_ids.mapped('quantity_done'))
quantity = quantity if (quantity > 0) else 0
for operation in bom.routing_id.operation_ids:
# create workorder
cycle_number = math.ceil(bom_qty / operation.workcenter_id.capacity) # TODO: float_round UP
duration_expected = (operation.workcenter_id.time_start +
operation.workcenter_id.time_stop +
cycle_number * operation.time_cycle * 100.0 / operation.workcenter_id.time_efficiency)
workorder = workorders.create({
'name': operation.name,
'production_id': self.id,
'workcenter_id': operation.workcenter_id.id,
'operation_id': operation.id,
'duration_expected': duration_expected,
'state': len(workorders) == 0 and 'ready' or 'pending',
'qty_producing': quantity,
'capacity': operation.workcenter_id.capacity,
})
if workorders:
workorders[-1].next_work_order_id = workorder.id
workorders += workorder
# assign moves; last operation receive all unassigned moves (which case ?)
moves_raw = self.move_raw_ids.filtered(lambda move: move.operation_id == operation)
if len(workorders) == len(bom.routing_id.operation_ids):
moves_raw |= self.move_raw_ids.filtered(lambda move: not move.operation_id)
moves_finished = self.move_finished_ids.filtered(lambda move: move.operation_id == operation) #TODO: code does nothing, unless maybe by_products?
moves_raw.mapped('move_lot_ids').write({'workorder_id': workorder.id})
(moves_finished + moves_raw).write({'workorder_id': workorder.id})
workorder._generate_lot_ids()
return workorders
@api.multi
def action_cancel(self):
""" Cancels production order, unfinished stock moves and set procurement
orders in exception """
if any(workorder.state == 'progress' for workorder in self.mapped('workorder_ids')):
raise UserError(_('You can not cancel production order, a work order is still in progress.'))
ProcurementOrder = self.env['procurement.order']
for production in self:
production.workorder_ids.filtered(lambda x: x.state != 'cancel').action_cancel()
finish_moves = production.move_finished_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
production.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')).action_cancel()
finish_moves.action_cancel()
procurements = ProcurementOrder.search([('move_dest_id', 'in', finish_moves.ids)])
if procurements:
procurements.cancel()
# Put relatfinish_to_canceled procurements in exception -> I agree
ProcurementOrder.search([('production_id', 'in', self.ids)]).write({'state': 'exception'})
self.write({'state': 'cancel'})
return True
@api.multi
def _cal_price(self, consumed_moves):
return True
@api.multi
def post_inventory(self):
for order in self:
moves_to_do = order.move_raw_ids
moves_to_do.action_done()
#order.move_finished_ids.filtered(lambda x: x.state not in ('done','cancel')).move_validate()
order._cal_price(moves_to_do)
moves_to_finish = order.move_finished_ids
moves_to_finish.action_done()
for move in moves_to_finish:
#Group quants by lots
lot_quants = {}
raw_lot_quants = {}
quants = self.env['stock.quant']
if move.has_tracking != 'none':
for quant in move.quant_ids:
lot_quants.setdefault(quant.lot_id.id, self.env['stock.quant'])
raw_lot_quants.setdefault(quant.lot_id.id, self.env['stock.quant'])
lot_quants[quant.lot_id.id] |= quant
for move_raw in moves_to_do:
if (move.has_tracking != 'none') and (move_raw.has_tracking != 'none'):
for lot in lot_quants:
lots = move_raw.move_lot_ids.filtered(lambda x: x.lot_produced_id.id == lot).mapped('lot_id')
raw_lot_quants[lot] |= move_raw.quant_ids.filtered(lambda x: (x.lot_id in lots) and (x.qty > 0.0))
else:
quants |= move_raw.quant_ids.filtered(lambda x: x.qty > 0.0)
if move.has_tracking != 'none':
for lot in lot_quants:
lot_quants[lot].sudo().write({'consumed_quant_ids': [(6, 0, [x.id for x in raw_lot_quants[lot] | quants])]})
else:
move.quant_ids.sudo().write({'consumed_quant_ids': [(6, 0, [x.id for x in quants])]})
order.action_assign()
return True
@api.multi
def button_mark_done(self):
self.ensure_one()
for wo in self.workorder_ids:
if wo.time_ids.filtered(lambda x: (not x.date_end) and (x.loss_type in ('productive', 'performance'))):
raise UserError(_('Work order %s is still running') % wo.name)
self.post_inventory()
moves_to_cancel = (self.move_raw_ids | self.move_finished_ids).filtered(lambda x: x.state not in ('done', 'cancel'))
moves_to_cancel.action_cancel()
self.write({'state': 'done', 'date_finished': fields.Datetime.now()})
self.env["procurement.order"].search([('production_id', 'in', self.ids)]).check()
self.write({'state': 'done'})
@api.multi
def do_unreserve(self):
for production in self:
production.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')).do_unreserve()
@api.multi
def button_unreserve(self):
self.ensure_one()
self.do_unreserve()
@api.multi
def button_scrap(self):
self.ensure_one()
return {
'name': _('Scrap'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.scrap',
'view_id': self.env.ref('stock.stock_scrap_form_view2').id,
'type': 'ir.actions.act_window',
'context': {'default_production_id': self.id,
'product_ids': (self.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) | self.move_finished_ids.filtered(lambda x: x.state == 'done')).mapped('product_id').ids,
},
'target': 'new',
}
@api.multi
def action_see_move_scrap(self):
self.ensure_one()
action = self.env.ref('stock.action_stock_scrap').read()[0]
action['domain'] = [('production_id', '=', self.id)]
return action
|
simongoffin/my_odoo_tutorial | refs/heads/master | addons/purchase_requisition/__openerp__.py | 61 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Requisitions',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Purchase Management',
'images': ['images/purchase_requisitions.jpeg'],
'website': 'http://www.openerp.com',
'description': """
This module allows you to manage your Purchase Requisition.
===========================================================
When a purchase order is created, you now have the opportunity to save the
related requisition. This new object will regroup and will allow you to easily
keep track and order all your purchase orders.
""",
'depends' : ['purchase'],
'demo': ['purchase_requisition_demo.xml'],
'data': ['views/purchase_requisition.xml',
'security/purchase_tender.xml',
'wizard/purchase_requisition_partner_view.xml',
'wizard/bid_line_qty_view.xml',
'purchase_requisition_data.xml',
'purchase_requisition_view.xml',
'purchase_requisition_report.xml',
'purchase_requisition_workflow.xml',
'security/ir.model.access.csv','purchase_requisition_sequence.xml',
'views/report_purchaserequisition.xml',
],
'auto_install': False,
'test': [
'test/purchase_requisition_users.yml',
'test/purchase_requisition_demo.yml',
'test/cancel_purchase_requisition.yml',
'test/purchase_requisition.yml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
iam-TJ/node-gyp | refs/heads/master | gyp/test/mac/gyptest-xcode-env-order.py | 86 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that dependent Xcode settings are processed correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'xcode-env-order'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Env vars in 'copies' filenames.
test.built_file_must_exist('Test-copy-brace/main.c', chdir=CHDIR)
test.built_file_must_exist('Test-copy-paren/main.c', chdir=CHDIR)
test.built_file_must_exist('Test-copy-bare/main.c', chdir=CHDIR)
# Env vars in 'actions' filenames and inline actions
test.built_file_must_exist('action-copy-brace.txt', chdir=CHDIR)
test.built_file_must_exist('action-copy-paren.txt', chdir=CHDIR)
test.built_file_must_exist('action-copy-bare.txt', chdir=CHDIR)
# Env vars in 'rules' filenames and inline actions
test.built_file_must_exist('rule-copy-brace.txt', chdir=CHDIR)
test.built_file_must_exist('rule-copy-paren.txt', chdir=CHDIR)
# TODO: see comment in test.gyp for this file.
#test.built_file_must_exist('rule-copy-bare.txt', chdir=CHDIR)
# Env vars in Info.plist.
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, '''\
\t<key>BraceProcessedKey1</key>
\t<string>D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BraceProcessedKey2</key>
\t<string>/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BraceProcessedKey3</key>
\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>ParenProcessedKey1</key>
\t<string>D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>ParenProcessedKey2</key>
\t<string>/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>ParenProcessedKey3</key>
\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BareProcessedKey1</key>
\t<string>D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>BareProcessedKey2</key>
\t<string>/Source/Project/Test</string>''')
# NOTE: For bare variables, $PRODUCT_TYPE is not replaced! It _is_ replaced
# if it's not right at the start of the string (e.g. ':$PRODUCT_TYPE'), so
# this looks like an Xcode bug. This bug isn't emulated (yet?), so check this
# only for Xcode.
if test.format == 'xcode':
test.must_contain(info_plist, '''\
\t<key>BareProcessedKey3</key>
\t<string>$PRODUCT_TYPE:D:/Source/Project/Test</string>''')
test.must_contain(info_plist, '''\
\t<key>MixedProcessedKey</key>
\t<string>/Source/Project:Test:mh_execute</string>''')
test.pass_test()
|
Simran-B/arangodb | refs/heads/docs_3.0 | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/dyndlg.py | 40 | # dyndlg.py
# contributed by Curt Hagenlocher <[email protected]>
# Dialog Template params:
# Parameter 0 - Window caption
# Parameter 1 - Bounds (rect tuple)
# Parameter 2 - Window style
# Parameter 3 - Extended style
# Parameter 4 - Font tuple
# Parameter 5 - Menu name
# Parameter 6 - Window class
# Dialog item params:
# Parameter 0 - Window class
# Parameter 1 - Text
# Parameter 2 - ID
# Parameter 3 - Bounds
# Parameter 4 - Style
# Parameter 5 - Extended style
# Parameter 6 - Extra data
import win32ui
import win32con
from pywin.mfc import dialog, window
def MakeDlgTemplate():
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
dlg = [ ["Select Warehouse", (0, 0, 177, 93), style, None, (8, "MS Sans Serif")], ]
dlg.append([130, "Current Warehouse:", -1, (7, 7, 69, 9), cs | win32con.SS_LEFT])
dlg.append([130, "ASTORIA", 128, (16, 17, 99, 7), cs | win32con.SS_LEFT])
dlg.append([130, "New &Warehouse:", -1, (7, 29, 69, 9), cs | win32con.SS_LEFT])
s = win32con.WS_TABSTOP | cs
# dlg.append([131, None, 130, (5, 40, 110, 48),
# s | win32con.LBS_NOTIFY | win32con.LBS_SORT | win32con.LBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL | win32con.WS_BORDER])
dlg.append(["{8E27C92B-1264-101C-8A2F-040224009C02}", None, 131, (5, 40, 110, 48),win32con.WS_TABSTOP])
dlg.append([128, "OK", win32con.IDOK, (124, 5, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Cancel", win32con.IDCANCEL, (124, 22, 50, 14), s])
dlg.append([128, "&Help", 100, (124, 74, 50, 14), s])
return dlg
def test1():
win32ui.CreateDialogIndirect( MakeDlgTemplate() ).DoModal()
def test2():
dialog.Dialog( MakeDlgTemplate() ).DoModal()
def test3():
dlg = win32ui.LoadDialogResource(win32ui.IDD_SET_TABSTOPS)
dlg[0][0] = 'New Dialog Title'
dlg[0][1] = (80, 20, 161, 60)
dlg[1][1] = '&Confusion:'
cs = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_TABSTOP | win32con.BS_PUSHBUTTON
dlg.append([128, "&Help", 100, (111, 41, 40, 14), cs])
dialog.Dialog( dlg ).DoModal()
def test4():
page1=dialog.PropertyPage(win32ui.LoadDialogResource(win32ui.IDD_PROPDEMO1))
page2=dialog.PropertyPage(win32ui.LoadDialogResource(win32ui.IDD_PROPDEMO2))
ps=dialog.PropertySheet('Property Sheet/Page Demo', None, [page1, page2])
ps.DoModal()
def testall():
test1()
test2()
test3()
test4()
if __name__=='__main__':
testall() |
tedder/ansible | refs/heads/devel | lib/ansible/plugins/action/debug.py | 34 | # Copyright 2012, Dag Wieers <[email protected]>
# Copyright 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Print statements during execution '''
TRANSFERS_FILES = False
_VALID_ARGS = frozenset(('msg', 'var', 'verbosity'))
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
if 'msg' in self._task.args and 'var' in self._task.args:
return {"failed": True, "msg": "'msg' and 'var' are incompatible options"}
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# get task verbosity
verbosity = int(self._task.args.get('verbosity', 0))
if verbosity <= self._display.verbosity:
if 'msg' in self._task.args:
result['msg'] = self._task.args['msg']
elif 'var' in self._task.args:
try:
results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True)
if results == self._task.args['var']:
# if results is not str/unicode type, raise an exception
if not isinstance(results, string_types):
raise AnsibleUndefinedVariable
# If var name is same as result, try to template it
results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True)
except AnsibleUndefinedVariable as e:
results = u"VARIABLE IS NOT DEFINED!"
if self._display.verbosity > 0:
results += u": %s" % to_text(e)
if isinstance(self._task.args['var'], (list, dict)):
# If var is a list or dict, use the type as key to display
result[to_text(type(self._task.args['var']))] = results
else:
result[self._task.args['var']] = results
else:
result['msg'] = 'Hello world!'
# force flag to make debug output module always verbose
result['_ansible_verbose_always'] = True
else:
result['skipped_reason'] = "Verbosity threshold not met."
result['skipped'] = True
return result
|
shivanikhosa/browserscope | refs/heads/master | categories/richtext2/tests/applyCSS.py | 16 |
APPLY_TESTS_CSS = {
'id': 'AC',
'caption': 'Apply Formatting Tests, using styleWithCSS',
'checkAttrs': True,
'checkStyle': True,
'styleWithCSS': True,
'Proposed': [
{ 'desc': '',
'command': '',
'tests': [
]
},
{ 'desc': '[HTML5] bold',
'command': 'bold',
'tests': [
{ 'id': 'B_TEXT-1_SI',
'rte1-id': 'a-bold-1',
'desc': 'Bold selection',
'pad': 'foo[bar]baz',
'expected': 'foo<span style="font-weight: bold">[bar]</span>baz' }
]
},
{ 'desc': '[HTML5] italic',
'command': 'italic',
'tests': [
{ 'id': 'I_TEXT-1_SI',
'rte1-id': 'a-italic-1',
'desc': 'Italicize selection',
'pad': 'foo[bar]baz',
'expected': 'foo<span style="font-style: italic">[bar]</span>baz' }
]
},
{ 'desc': '[HTML5] underline',
'command': 'underline',
'tests': [
{ 'id': 'U_TEXT-1_SI',
'rte1-id': 'a-underline-1',
'desc': 'Underline selection',
'pad': 'foo[bar]baz',
'expected': 'foo<span style="text-decoration: underline">[bar]</span>baz' }
]
},
{ 'desc': '[HTML5] strikethrough',
'command': 'strikethrough',
'tests': [
{ 'id': 'S_TEXT-1_SI',
'rte1-id': 'a-strikethrough-1',
'desc': 'Strike-through selection',
'pad': 'foo[bar]baz',
'expected': 'foo<span style="text-decoration: line-through">[bar]</span>baz' }
]
},
{ 'desc': '[HTML5] subscript',
'command': 'subscript',
'tests': [
{ 'id': 'SUB_TEXT-1_SI',
'rte1-id': 'a-subscript-1',
'desc': 'Change selection to subscript',
'pad': 'foo[bar]baz',
'expected': 'foo<span style="vertical-align: sub">[bar]</span>baz' }
]
},
{ 'desc': '[HTML5] superscript',
'command': 'superscript',
'tests': [
{ 'id': 'SUP_TEXT-1_SI',
'rte1-id': 'a-superscript-1',
'desc': 'Change selection to superscript',
'pad': 'foo[bar]baz',
'expected': 'foo<span style="vertical-align: super">[bar]</span>baz' }
]
},
{ 'desc': '[MIDAS] backcolor',
'command': 'backcolor',
'tests': [
{ 'id': 'BC:blue_TEXT-1_SI',
'rte1-id': 'a-backcolor-1',
'desc': 'Change background color',
'value': 'blue',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="background-color: blue">[bar]</span>baz',
'foo<font style="background-color: blue">[bar]</font>baz' ] }
]
},
{ 'desc': '[MIDAS] forecolor',
'command': 'forecolor',
'tests': [
{ 'id': 'FC:blue_TEXT-1_SI',
'rte1-id': 'a-forecolor-1',
'desc': 'Change the text color',
'value': 'blue',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="color: blue">[bar]</span>baz',
'foo<font style="color: blue">[bar]</font>baz' ] }
]
},
{ 'desc': '[MIDAS] hilitecolor',
'command': 'hilitecolor',
'tests': [
{ 'id': 'HC:blue_TEXT-1_SI',
'rte1-id': 'a-hilitecolor-1',
'desc': 'Change the hilite color',
'value': 'blue',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="background-color: blue">[bar]</span>baz',
'foo<font style="background-color: blue">[bar]</font>baz' ] }
]
},
{ 'desc': '[MIDAS] fontname',
'command': 'fontname',
'tests': [
{ 'id': 'FN:a_TEXT-1_SI',
'rte1-id': 'a-fontname-1',
'desc': 'Change the font name',
'value': 'arial',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="font-family: arial">[bar]</span>baz',
'foo<font style="font-family: blue">[bar]</font>baz' ] }
]
},
{ 'desc': '[MIDAS] fontsize',
'command': 'fontsize',
'tests': [
{ 'id': 'FS:2_TEXT-1_SI',
'rte1-id': 'a-fontsize-1',
'desc': 'Change the font size to "2"',
'value': '2',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="font-size: small">[bar]</span>baz',
'foo<font style="font-size: small">[bar]</font>baz' ] },
{ 'id': 'FS:18px_TEXT-1_SI',
'desc': 'Change the font size to "18px"',
'value': '18px',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="font-size: 18px">[bar]</span>baz',
'foo<font style="font-size: 18px">[bar]</font>baz' ] },
{ 'id': 'FS:large_TEXT-1_SI',
'desc': 'Change the font size to "large"',
'value': 'large',
'pad': 'foo[bar]baz',
'expected': [ 'foo<span style="font-size: large">[bar]</span>baz',
'foo<font style="font-size: large">[bar]</font>baz' ] }
]
},
{ 'desc': '[MIDAS] indent',
'command': 'indent',
'tests': [
{ 'id': 'IND_TEXT-1_SI',
'rte1-id': 'a-indent-1',
'desc': 'Indent the text (assume "standard" 40px)',
'pad': 'foo[bar]baz',
'expected': [ '<div style="margin-left: 40px">foo[bar]baz</div>',
'<div style="margin: 0 0 0 40px">foo[bar]baz</div>',
'<blockquote style="margin-left: 40px">foo[bar]baz</blockquote>',
'<blockquote style="margin: 0 0 0 40px">foo[bar]baz</blockquote>' ],
'div': {
'accOuter': [ '<div contenteditable="true" style="margin-left: 40px">foo[bar]baz</div>',
'<div contenteditable="true" style="margin: 0 0 0 40px">foo[bar]baz</div>' ] } }
]
},
{ 'desc': '[MIDAS] outdent (-> unapply tests)',
'command': 'outdent',
'tests': [
]
},
{ 'desc': '[MIDAS] justifycenter',
'command': 'justifycenter',
'tests': [
{ 'id': 'JC_TEXT-1_SC',
'rte1-id': 'a-justifycenter-1',
'desc': 'justify the text centrally',
'pad': 'foo^bar',
'expected': [ '<p style="text-align: center">foo^bar</p>',
'<div style="text-align: center">foo^bar</div>' ],
'div': {
'accOuter': '<div contenteditable="true" style="text-align: center">foo^bar</div>' } }
]
},
{ 'desc': '[MIDAS] justifyfull',
'command': 'justifyfull',
'tests': [
{ 'id': 'JF_TEXT-1_SC',
'rte1-id': 'a-justifyfull-1',
'desc': 'justify the text fully',
'pad': 'foo^bar',
'expected': [ '<p style="text-align: justify">foo^bar</p>',
'<div style="text-align: justify">foo^bar</div>' ],
'div': {
'accOuter': '<div contenteditable="true" style="text-align: justify">foo^bar</div>' } }
]
},
{ 'desc': '[MIDAS] justifyleft',
'command': 'justifyleft',
'tests': [
{ 'id': 'JL_TEXT-1_SC',
'rte1-id': 'a-justifyleft-1',
'desc': 'justify the text left',
'pad': 'foo^bar',
'expected': [ '<p style="text-align: left">foo^bar</p>',
'<div style="text-align: left">foo^bar</div>' ],
'div': {
'accOuter': '<div contenteditable="true" style="text-align: left">foo^bar</div>' } }
]
},
{ 'desc': '[MIDAS] justifyright',
'command': 'justifyright',
'tests': [
{ 'id': 'JR_TEXT-1_SC',
'rte1-id': 'a-justifyright-1',
'desc': 'justify the text right',
'pad': 'foo^bar',
'expected': [ '<p style="text-align: right">foo^bar</p>',
'<div style="text-align: right">foo^bar</div>' ],
'div': {
'accOuter': '<div contenteditable="true" style="text-align: right">foo^bar</div>' } }
]
}
]
}
|
EduPepperPDTesting/pepper2013-testing | refs/heads/www0 | lms/djangoapps/django_comment_client/tests/test_helpers.py | 20 | from django.test import TestCase
from django_comment_client.helpers import pluralize
class PluralizeTestCase(TestCase):
def testPluralize(self):
self.term = "cat"
self.assertEqual(pluralize(self.term, 0), "cats")
self.assertEqual(pluralize(self.term, 1), "cat")
self.assertEqual(pluralize(self.term, 2), "cats")
|
digideskio/st2contrib | refs/heads/master | packs/newrelic/actions/get_alerts.py | 2 | import requests
from st2actions.runners.pythonrunner import Action
class GetAppHealthStatusAction(Action):
"""
Get health status of new relic application(s).
"""
def __init__(self, *args, **kwargs):
super(GetAppHealthStatusAction, Action).__init__(*args, **kwargs)
self.url = 'https://api.newrelic.com/v2/applications.json'
self.headers = {
'User-Agent': 'StackStorm-New-Relic-Sensor/1.0.0 python-requests/2.7.0',
'content-type': 'application/x-www-form-urlencoded',
}
self.headers['X-Api-Key'] = self.config['api_key']
def run(self, app_name=None):
params = None
if app_name:
params = {'filter[name]': app_name}
resp = requests.get(self.url, headers=self.headers, params=params).json()
app_status_map = {}
for application in resp['applications']:
app_status_map[application['name']] = application['health_status']
return app_status_map
|
topicusonderwijs/zxing-ios | refs/heads/master | cpp/scons/scons-local-2.0.0.final.0/SCons/compat/_scons_collections.py | 34 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py 5023 2010/06/14 22:05:46 scons"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mikel-egana-aranguren/SADI-Galaxy-Docker | refs/heads/master | galaxy-dist/eggs/requests-2.2.1-py2.7.egg/requests/api.py | 637 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
|
vadimtk/chrome4sdp | refs/heads/master | third_party/cython/src/Cython/Debugger/__init__.py | 1472 | # empty file
|
michaelgallacher/intellij-community | refs/heads/master | python/testData/editing/enterDocstringStubWhenFunctionDocstringBelow.after.py | 44 | def f():
"""
Returns:
"""
def g():
"""
bar
""" |
aayushidwivedi01/spark-tk | refs/heads/master | python/sparktk/models/clustering/__init__.py | 137 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.loggers import log_load; log_load(__name__); del log_load
|
slobberchops/rop | refs/heads/master | art/life.py | 1 | __author__ = 'rafe'
from opc.hue import hue
import random
import numpy
HSCALE = 0.1
RESET_INTERVAL = 20
class Art(object):
description = "Conway's Game of Life"
def __init__(self, matrix, config):
self.iterations = config.get('ITERATIONS', 1000)
self.hscale = config.get('HSCALE', HSCALE)
self._interval = config.get('INTERVAL', 300)
def start(self, matrix):
self.lifes = numpy.empty([matrix.width, matrix.height])
self.prior = numpy.copy(self.lifes)
for y in range(matrix.height):
for x in range(matrix.width):
self.lifes[x, y] = random.randint(0, 1)
self.reset_counter = 0
self.global_countdown = self.iterations
self.hue = random.random()
def _hue(self, offset):
return hue(self.hue+self.hscale*offset)
def refresh(self, matrix):
matrix.shift(dh=0.9, dv=0.8)
width = matrix.width
height = matrix.height
lifes = self.lifes
next = numpy.copy(lifes)
for y in range(height):
for x in range(width):
minus_x = (x - 1) % width
minus_y = (y - 1) % height
plus_x = (x + 1) % width
plus_y = (y + 1) % height
neighbors = sum([
lifes[minus_x, minus_y],
lifes[x, minus_y],
lifes[plus_x, minus_y],
lifes[minus_x, y],
lifes[plus_x, y],
lifes[minus_x, plus_y],
lifes[x, plus_y],
lifes[plus_x, plus_y],
])
if lifes[x, y]:
if neighbors == 2:
next[x, y] = 1
color = self._hue(0)
elif neighbors == 3:
color = self._hue(1)
else:
next[x, y] = 0
color = None
else:
if neighbors == 3:
next[x, y] = 1
color = self._hue(2)
else:
next[x, y] = 0
color = None
if color:
matrix.drawPixel(x, y, color)
if (next == self.prior).all() or (next == self.lifes).all():
self.reset_counter += 1
else:
self.reset_counter = 0
self.prior = lifes
self.lifes = next
self.global_countdown -= 1
if self.reset_counter == RESET_INTERVAL or not self.global_countdown:
self.start(matrix)
def interval(self):
return self._interval
|
sonata-nfv/son-cli | refs/heads/master | src/son/monitor/utils.py | 5 | """
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import yaml
import logging
## helper functions
def parse_vnf_name( vnf_name_str):
vnf_name = vnf_name_str.split(':')[0]
return vnf_name
def parse_vnf_interface( vnf_name_str):
try:
vnf_interface = vnf_name_str.split(':')[1]
except:
#vnf_interface = None
vnf_interface = vnf_name_str
return vnf_interface
def create_dict(**kwargs):
return kwargs
def parse_network(network_str):
'''
parse the options for all network interfaces of the vnf
:param network_str: (id=x,ip=x.x.x.x/x), ...
:return: list of dicts [{"id":x,"ip":"x.x.x.x/x"}, ...]
'''
nw_list = list()
networks = network_str[1:-1].split('),(')
for nw in networks:
nw_dict = dict(tuple(e.split('=')) for e in nw.split(','))
nw_list.append(nw_dict)
return nw_list
def valid_arguments(*args):
ret = True
for arg in args:
if arg is None or arg == "":
#log.error("Argument not valid: {0}".format(arg))
ret = False
return ret
def construct_url(base, prefix, *args):
url = '/'.join([base, prefix])
for arg in args:
if valid_arguments(arg):
url += "/" + str(arg)
return url
def load_yaml(path):
with open(path, "r") as f:
try:
r = yaml.load(f)
except yaml.YAMLError as exc:
logging.exception("YAML parse error")
r = dict()
return r
def switch_tx_rx(self, metric=''):
# in link monitoring, tx at source means rx at destination and vice-versa
if 'tx' in metric:
metric = metric.replace('tx', 'rx')
elif 'rx' in metric:
metric = metric.replace('rx', 'tx')
return metric |
halberom/ansible-modules-core | refs/heads/devel | cloud/docker/docker_image_facts.py | 8 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: docker_image_facts
short_description: Inspect docker images
version_added: "2.1.0"
description:
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
options:
name:
description:
- An image name or a list of image names. Name format will be name[:tag] or repository/name[:tag], where tag is
optional. If a tag is not provided, 'latest' will be used.
default: null
required: true
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
authors:
- Chris Houseknecht (@chouseknecht)
- James Tanner (@jctanner)
'''
EXAMPLES = '''
- name: Inspect a single image
docker_image_facts:
name: pacur/centos-7
- name: Inspect multiple images
docker_iamge_facts:
name:
- pacur/centos-7
- sinatra
'''
RETURN = '''
images:
description: Facts for the selected images.
returned: always
type: dict
sample: [
{
"Architecture": "amd64",
"Author": "",
"Comment": "",
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/etc/docker/registry/config.yml"
],
"Domainname": "",
"Entrypoint": [
"/bin/registry"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"5000/tcp": {}
},
"Hostname": "e5c68db50333",
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/var/lib/registry": {}
},
"WorkingDir": ""
},
"Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
"ContainerConfig": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/bin/sh",
"-c",
'#(nop) CMD ["/etc/docker/registry/config.yml"]'
],
"Domainname": "",
"Entrypoint": [
"/bin/registry"
],
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"5000/tcp": {}
},
"Hostname": "e5c68db50333",
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
"Labels": {},
"OnBuild": [],
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/var/lib/registry": {}
},
"WorkingDir": ""
},
"Created": "2016-03-08T21:08:15.399680378Z",
"DockerVersion": "1.9.1",
"GraphDriver": {
"Data": null,
"Name": "aufs"
},
"Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
"Name": "registry:2",
"Os": "linux",
"Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
"RepoDigests": [],
"RepoTags": [
"registry:2"
],
"Size": 0,
"VirtualSize": 165808884
}
]
'''
from ansible.module_utils.docker_common import *
try:
from docker import auth
from docker import utils
except ImportError:
# missing docker-py handled in docker_common
pass
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
self.name = self.client.module.params.get('name')
self.log("Gathering facts for images: %s" % (str(self.name)))
if self.name:
self.results['images'] = self.get_facts()
else:
self.results['images'] = self.get_all_images()
def fail(self, msg):
self.client.fail(msg)
def get_facts(self):
'''
Lookup and inspect each image name found in the names parameter.
:returns array of image dictionaries
'''
results = []
names = self.name
if not isinstance(names, list):
names = [names]
for name in names:
repository, tag = utils.parse_repository_tag(name)
if not tag:
tag = 'latest'
self.log('Fetching image %s:%s' % (repository, tag))
image = self.client.find_image(name=repository, tag=tag)
if image:
results.append(image)
return results
def get_all_images(self):
results = []
images = self.client.images()
for image in images:
try:
inspection = self.client.inspect_image(image['Id'])
except Exception, exc:
self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
results.append(inspection)
return results
def main():
argument_spec = dict(
name=dict(type='list'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec
)
results = dict(
changed=False,
images=[]
)
ImageManager(client, results)
client.module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
njmube/erpnext | refs/heads/develop | erpnext/projects/doctype/project/project.py | 2 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate, get_url
from frappe import _
from frappe.model.document import Document
from erpnext.controllers.queries import get_filters_cond
from frappe.desk.reportview import get_match_cond
class Project(Document):
def get_feed(self):
return '{0}: {1}'.format(_(self.status), self.project_name)
def onload(self):
"""Load project tasks for quick view"""
if not self.get('__unsaved') and not self.get("tasks"):
self.load_tasks()
self.set_onload('activity_summary', frappe.db.sql('''select activity_type,
sum(hours) as total_hours
from `tabTimesheet Detail` where project=%s and docstatus < 2 group by activity_type
order by total_hours desc''', self.name, as_dict=True))
def __setup__(self):
self.onload()
def load_tasks(self):
"""Load `tasks` from the database"""
self.tasks = []
for task in self.get_tasks():
task_map = {
"title": task.subject,
"status": task.status,
"start_date": task.exp_start_date,
"end_date": task.exp_end_date,
"description": task.description,
"task_id": task.name,
"task_weight": task.task_weight
}
self.map_custom_fields(task, task_map)
self.append("tasks", task_map)
def get_tasks(self):
return frappe.get_all("Task", "*", {"project": self.name}, order_by="exp_start_date asc")
def validate(self):
self.validate_dates()
self.validate_weights()
self.sync_tasks()
self.tasks = []
self.send_welcome_email()
def validate_dates(self):
if self.expected_start_date and self.expected_end_date:
if getdate(self.expected_end_date) < getdate(self.expected_start_date):
frappe.throw(_("Expected End Date can not be less than Expected Start Date"))
def validate_weights(self):
sum = 0
for task in self.tasks:
if task.task_weight > 0:
sum = sum + task.task_weight
if sum > 0 and sum != 1:
frappe.throw(_("Total of all task weights should be 1. Please adjust weights of all Project tasks accordingly"))
def sync_tasks(self):
"""sync tasks and remove table"""
if self.flags.dont_sync_tasks: return
task_names = []
for t in self.tasks:
if t.task_id:
task = frappe.get_doc("Task", t.task_id)
else:
task = frappe.new_doc("Task")
task.project = self.name
task.update({
"subject": t.title,
"status": t.status,
"exp_start_date": t.start_date,
"exp_end_date": t.end_date,
"description": t.description,
"task_weight": t.task_weight
})
self.map_custom_fields(t, task)
task.flags.ignore_links = True
task.flags.from_project = True
task.flags.ignore_feed = True
task.save(ignore_permissions = True)
task_names.append(task.name)
# delete
for t in frappe.get_all("Task", ["name"], {"project": self.name, "name": ("not in", task_names)}):
frappe.delete_doc("Task", t.name)
self.update_percent_complete()
self.update_costing()
def map_custom_fields(self, source, target):
project_task_custom_fields = frappe.get_all("Custom Field", {"dt": "Project Task"}, "fieldname")
for field in project_task_custom_fields:
target.update({
field.fieldname: source.get(field.fieldname)
})
def update_project(self):
self.update_percent_complete()
self.update_costing()
self.flags.dont_sync_tasks = True
self.save(ignore_permissions = True)
def update_percent_complete(self):
total = frappe.db.sql("""select count(name) from tabTask where project=%s""", self.name)[0][0]
if (self.percent_complete_method == "Task Completion" and total > 0) or (not self.percent_complete_method and total > 0):
completed = frappe.db.sql("""select count(name) from tabTask where
project=%s and status in ('Closed', 'Cancelled')""", self.name)[0][0]
self.percent_complete = flt(flt(completed) / total * 100, 2)
if (self.percent_complete_method == "Task Progress" and total > 0):
progress = frappe.db.sql("""select sum(progress) from tabTask where
project=%s""", self.name)[0][0]
self.percent_complete = flt(flt(progress) / total, 2)
if (self.percent_complete_method == "Task Weight" and total > 0):
weight_sum = frappe.db.sql("""select sum(task_weight) from tabTask where
project=%s""", self.name)[0][0]
if weight_sum == 1:
weighted_progress = frappe.db.sql("""select progress,task_weight from tabTask where
project=%s""", self.name,as_dict=1)
pct_complete=0
for row in weighted_progress:
pct_complete += row["progress"] * row["task_weight"]
self.percent_complete = flt(flt(pct_complete), 2)
def update_costing(self):
from_time_sheet = frappe.db.sql("""select
sum(costing_amount) as costing_amount,
sum(billing_amount) as billing_amount,
min(from_time) as start_date,
max(to_time) as end_date,
sum(hours) as time
from `tabTimesheet Detail` where project = %s and docstatus = 1""", self.name, as_dict=1)[0]
from_expense_claim = frappe.db.sql("""select
sum(total_sanctioned_amount) as total_sanctioned_amount
from `tabExpense Claim` where project = %s and approval_status='Approved'
and docstatus = 1""",
self.name, as_dict=1)[0]
self.actual_start_date = from_time_sheet.start_date
self.actual_end_date = from_time_sheet.end_date
self.total_costing_amount = from_time_sheet.costing_amount
self.total_billing_amount = from_time_sheet.billing_amount
self.actual_time = from_time_sheet.time
self.total_expense_claim = from_expense_claim.total_sanctioned_amount
self.gross_margin = flt(self.total_billing_amount) - flt(self.total_costing_amount)
if self.total_billing_amount:
self.per_gross_margin = (self.gross_margin / flt(self.total_billing_amount)) *100
def update_purchase_costing(self):
total_purchase_cost = frappe.db.sql("""select sum(base_net_amount)
from `tabPurchase Invoice Item` where project = %s and docstatus=1""", self.name)
self.total_purchase_cost = total_purchase_cost and total_purchase_cost[0][0] or 0
def send_welcome_email(self):
url = get_url("/project/?name={0}".format(self.name))
messages = (
_("You have been invited to collaborate on the project: {0}".format(self.name)),
url,
_("Join")
)
content = """
<p>{0}.</p>
<p><a href="{1}">{2}</a></p>
"""
for user in self.users:
if user.welcome_email_sent==0:
frappe.sendmail(user.user, subject=_("Project Collaboration Invitation"), content=content.format(*messages))
user.welcome_email_sent=1
def on_update(self):
self.load_tasks()
self.sync_tasks()
def get_timeline_data(doctype, name):
'''Return timeline for attendance'''
return dict(frappe.db.sql('''select unix_timestamp(from_time), count(*)
from `tabTimesheet Detail` where project=%s
and from_time > date_sub(curdate(), interval 1 year)
and docstatus < 2
group by date(from_time)''', name))
def get_project_list(doctype, txt, filters, limit_start, limit_page_length=20):
return frappe.db.sql('''select distinct project.*
from tabProject project, `tabProject User` project_user
where
(project_user.user = %(user)s
and project_user.parent = project.name)
or project.owner = %(user)s
order by project.modified desc
limit {0}, {1}
'''.format(limit_start, limit_page_length),
{'user':frappe.session.user},
as_dict=True,
update={'doctype':'Project'})
def get_list_context(context=None):
return {
"show_sidebar": True,
"show_search": True,
'no_breadcrumbs': True,
"title": _("Projects"),
"get_list": get_project_list,
"row_template": "templates/includes/projects/project_row.html"
}
def get_users_for_project(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select name, concat_ws(' ', first_name, middle_name, last_name)
from `tabUser`
where enabled=1
and name not in ("Guest", "Administrator")
and ({key} like %(txt)s
or full_name like %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, full_name), locate(%(_txt)s, full_name), 99999),
idx desc,
name, full_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
@frappe.whitelist()
def get_cost_center_name(project):
return frappe.db.get_value("Project", project, "cost_center")
|
mattvick/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/test/main_unittest.py | 124 | # Copyright (C) 2012 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import sys
import unittest2 as unittest
import StringIO
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.test.main import Tester, _Loader
STUBS_CLASS = __name__ + ".TestStubs"
class TestStubs(unittest.TestCase):
def test_empty(self):
pass
def integration_test_empty(self):
pass
def serial_test_empty(self):
pass
def serial_integration_test_empty(self):
pass
class TesterTest(unittest.TestCase):
def test_no_tests_found(self):
tester = Tester()
errors = StringIO.StringIO()
# Here we need to remove any existing log handlers so that they
# don't log the messages webkitpy.test while we're testing it.
root_logger = logging.getLogger()
root_handlers = root_logger.handlers
root_logger.handlers = []
tester.printer.stream = errors
tester.finder.find_names = lambda args, run_all: []
oc = OutputCapture()
try:
oc.capture_output()
self.assertFalse(tester.run())
finally:
_, _, logs = oc.restore_output()
root_logger.handlers = root_handlers
self.assertIn('No tests to run', errors.getvalue())
self.assertIn('No tests to run', logs)
def _find_test_names(self, args):
tester = Tester()
tester._options, args = tester._parse_args(args)
return tester._test_names(_Loader(), args)
def test_individual_names_are_not_run_twice(self):
args = [STUBS_CLASS + '.test_empty']
parallel_tests, serial_tests = self._find_test_names(args)
self.assertEqual(parallel_tests, args)
self.assertEqual(serial_tests, [])
def test_integration_tests_are_not_found_by_default(self):
parallel_tests, serial_tests = self._find_test_names([STUBS_CLASS])
self.assertEqual(parallel_tests, [
STUBS_CLASS + '.test_empty',
])
self.assertEqual(serial_tests, [
STUBS_CLASS + '.serial_test_empty',
])
def test_integration_tests_are_found(self):
parallel_tests, serial_tests = self._find_test_names(['--integration-tests', STUBS_CLASS])
self.assertEqual(parallel_tests, [
STUBS_CLASS + '.integration_test_empty',
STUBS_CLASS + '.test_empty',
])
self.assertEqual(serial_tests, [
STUBS_CLASS + '.serial_integration_test_empty',
STUBS_CLASS + '.serial_test_empty',
])
def integration_test_coverage_works(self):
filesystem = FileSystem()
executive = Executive()
module_path = filesystem.path_to_module(self.__module__)
script_dir = module_path[0:module_path.find('webkitpy') - 1]
proc = executive.popen([sys.executable, filesystem.join(script_dir, 'test-webkitpy'), '-c', STUBS_CLASS + '.test_empty'],
stdout=executive.PIPE, stderr=executive.PIPE)
out, _ = proc.communicate()
retcode = proc.returncode
self.assertEqual(retcode, 0)
self.assertIn('Cover', out)
|
rdo-management/heat | refs/heads/mgt-master | heat/db/sqlalchemy/migrate_repo/versions/045_stack_backup.py | 13 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
backup = sqlalchemy.Column('backup', sqlalchemy.Boolean(), default=False)
backup.create(stack)
# Set backup flag for backup stacks, which are the only ones named "foo*"
not_deleted = None
stmt = sqlalchemy.select([stack.c.id,
stack.c.name]
).where(stack.c.deleted_at == not_deleted)
stacks = migrate_engine.execute(stmt)
for s in stacks:
if s.name.endswith('*'):
values = {'backup': True}
update = stack.update().where(
stack.c.id == s.id).values(values)
migrate_engine.execute(update)
|
mayankcu/Django-social | refs/heads/master | venv/Lib/encodings/euc_jisx0213.py | 816 | #
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
Daniel-CA/odoo-addons | refs/heads/8.0 | product_stock_on_hand/post_install.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2017 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import SUPERUSER_ID
def update_stock_on_hand_locations(cr, pool):
slo = pool['stock.location']
location_ids = slo.search(cr, SUPERUSER_ID, [('usage', '=', 'internal')])
if location_ids:
slo.write(cr, SUPERUSER_ID, location_ids, {'stock_on_hand': True})
return
|
idaholab/raven | refs/heads/devel | scripts/TestHarness/testers/RAVENImageDiff.py | 2 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This tests images against a expected image.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import sys
try:
from scipy.misc import imread
correctImport = True
except ImportError:
import scipy
correctImport = False
from Tester import Differ
class ImageDiff:
"""
ImageDiff is used for comparing two image files.
"""
def __init__(self, outFiles, goldFiles, relativeError=1e-10, zeroThreshold=None):
"""
Create an ImageDiff class
@ In, outFiles, the files to be compared.
@ In, goldFiles, the files to be compared to the outFiles.
@ In, relativeError, float, optional, relative error
@ In, zeroThreshold, float, optional, if a number is less equal then
abs(zeroThreshold), it will be considered 0
@ Out, None.
"""
#assert len(outFiles) == len(goldFiles)
self.__out_files = outFiles
self.__gold_files = goldFiles
self.__message = ""
self.__same = True
self.__rel_err = relativeError
self.__zero_threshold = float(zeroThreshold) if zeroThreshold is not None else 0.0
def diff(self):
"""
Run the comparison.
returns (same,messages) where same is true if the
image files are the same, and messages is a string with all the
differences.
In, None
Out, None
"""
# read in files
filesRead = False
for testFilename, goldFilename in zip(self.__out_files, self.__gold_files):
if not os.path.exists(testFilename):
self.__same = False
self.__message += 'Test file does not exist: '+testFilename
elif not os.path.exists(goldFilename):
self.__same = False
self.__message += 'Gold file does not exist: '+goldFilename
else:
filesRead = True
#read in files
if filesRead:
if not correctImport:
self.__message += 'ImageDiff cannot run with scipy version less '+\
'than 0.15.0, and requires the PIL installed; scipy version is '+\
str(scipy.__version__)
self.__same = False
return(self.__same, self.__message)
try:
# RAK - The original line...
# testImage = imread(open(testFilename,'r'))
# ...didn't work on Windows Python because it couldn't sense the file type
testImage = imread(testFilename)
except IOError:
self.__message += 'Unrecognized file type for test image in scipy.imread: '+testFilename
filesRead = False
return (False, self.__message)
try:
# RAK - The original line...
# goldImage = imread(open(goldFilename,'r'))
# ...didn't work on Windows Python because it couldn't sense the file type
goldImage = imread(goldFilename)
except IOError:
filesRead = False
self.__message += 'Unrecognized file type for test image in scipy.imread: '+goldFilename
return (False, self.__message)
#first check dimensionality
if goldImage.shape != testImage.shape:
self.__message += 'Gold and test image are not the same shape: '+\
str(goldImage.shape)+', '+str(testImage.shape)
self.__same = False
return (self.__same, self.__message)
#pixelwise comparison
#TODO in the future we can add greyscale, normalized coloring, etc.
# For now just do raw comparison of right/wrong pixels
diff = goldImage - testImage
onlyDiffs = diff[abs(diff) > self.__zero_threshold]
pctNumDiff = onlyDiffs.size/float(diff.size)
if pctNumDiff > self.__rel_err:
self.__message += 'Difference between images is too large:'+\
' %2.2f pct (allowable: %2.2f)' %(100*pctNumDiff,\
100*self.__rel_err)
self.__same = False
return (self.__same, self.__message)
class ImageDiffer(Differ):
"""
This is the class to use for handling the parameters block.
"""
@staticmethod
def get_valid_params():
"""
Returns the valid parameters for this class.
@ In, None
@ Out, params, _ValidParameters, return the parameters.
"""
params = Differ.get_valid_params()
params.add_param('rel_err', '', 'Relative Error for image files')
params.add_param('zero_threshold', sys.float_info.min*4.0,
'it represents the value below which a float is '+
'considered zero in the pixel comparison')
return params
def __init__(self, name, params, testDir):
"""
Initializer for the class. Takes a String name and a dictionary params
@ In, name, string, name of the test.
@ In, params, dictionary, parameters for the class
@ In, testDir, string, path to the test.
@ Out, None.
"""
Differ.__init__(self, name, params, testDir)
self.__zero_threshold = self.specs['zero_threshold']
if len(self.specs['rel_err']) > 0:
self.__rel_err = float(self.specs['rel_err'])
else:
self.__rel_err = 1e-10
def check_output(self):
"""
Checks that the output matches the gold.
returns (same, message) where same is true if the
test passes, or false if the test failes. message should
gives a human readable explaination of the differences.
@ In, None
@ Out, (same, message), same is true if the tests passes.
"""
imageFiles = self._get_test_files()
goldFiles = self._get_gold_files()
imageDiff = ImageDiff(imageFiles,
goldFiles,
relativeError=self.__rel_err,
zeroThreshold=self.__zero_threshold)
return imageDiff.diff()
|
fhaoquan/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/lib2to3/fixes/fix_reduce.py | 203 | # Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for reduce().
Makes sure reduce() is imported from the functools module if reduce is
used in that module.
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import touch_import
class FixReduce(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'reduce'
trailer< '('
arglist< (
(not(argument<any '=' any>) any ','
not(argument<any '=' any>) any) |
(not(argument<any '=' any>) any ','
not(argument<any '=' any>) any ','
not(argument<any '=' any>) any)
) >
')' >
>
"""
def transform(self, node, results):
touch_import('functools', 'reduce', node)
|
pcubillos/MCcubed | refs/heads/master | MCcubed/mc/chain.py | 1 | # Copyright (c) 2015-2019 Patricio Cubillos and contributors.
# MC3 is open-source software under the MIT license (see LICENSE).
import sys
import os
import warnings
import random
import multiprocessing as mp
import numpy as np
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../lib')
import chisq as cs
import dwt as dwt
if sys.version_info.major == 2:
range = xrange
# Ingnore RuntimeWarnings:
warnings.simplefilter("ignore", RuntimeWarning)
class Chain(mp.Process):
"""
Background process. This guy evaluates the model and calculates chisq.
"""
def __init__(self, func, args, pipe, data, uncert,
params, freepars, stepsize, pmin, pmax,
walk, wlike, prior, priorlow, priorup, thinning,
fgamma, fepsilon, Z, Zsize, Zchisq, Zchain, M0,
numaccept, outbounds, ncpp,
chainsize, bestp, bestchisq, ID, nproc, **kwds):
"""
Class initializer.
Parameters
----------
func: Callable
Model fitting function.
args: List
Additional arguments for function (besides the fitting parameters).
pipe: multiprocessing.Pipe object
Pipe to communicate with mcmc.
data: 1D shared-ctypes float ndarray
Dependent data fitted by func.
uncert: 1D Shared ctypes float ndarray
Uncertainty of data.
params: 1D float array
Array of model parameters (including fixed and shared).
freepars: 2D shared-ctypes float ndarray
Current state of fitting parameters (X, as in Braak & Vrugt 2008).
stepsize: 1D float ndarray
Proposal jump scale.
pmin: 1D float ndarray
Lower boundaries of the posteriors.
pmax: 1D float ndarray
Upper boundaries of the posteriors.
walk: String
Flag to indicate wich MCMC algorithm to use [mrw, demc, snooker].
wlike: Boolean
Flag to use a wavelet-based likelihood function (True) or not (False).
prior: 1D float ndarray
Parameter prior.
priorlow: 1D float ndarray
Prior lower uncertainties.
priorup: 1D float ndarray
Prior uppper uncertainties.
thinning: Integer
Thinning factor of the chains.
fgamma: Float
Proposals jump scale factor for DEMC's gamma.
The code computes: gamma = fgamma * 2.38 / sqrt(2*Nfree)
fepsilon: Float
Jump scale factor for DEMC's support distribution.
The code computes: e = fepsilon * Normal(0, stepsize)
Z: 2D shared-ctype float ndarray
MCMC parameters history (Z, as in Braak & Vrugt 2008).
Zsize: Shared ctypes integer
Current number of samples in the Z array.
Zchisq: Float multiprocessing.Array
Chi square values for the Z-array samples.
Zchain: multiprocessing.Array integer
Chain ID for the given state in the Z array.
M0: Integer
Initial number of samples in the Z array.
numaccept: multiprocessing.Value integer
Number of accepted MCMC proposals
outbounds: 1D shared multiprocessing integer Array
Array to count the number of out-of-bound proposals per free parameter.
ncpp: Integer
Number of chains for this process.
chainsize: multiprocessing.Array integer
The current length of this chain.
bestp: Shared ctypes float array
The array with the current best-fitting parameter.
bestchisq: Float multiprocessing.Value
The chi-square value for bestp.
ID: Integer
Identification serial number for this chain.
nproc: Integer
The number of processes running chains.
"""
# Multiprocessing setup:
mp.Process.__init__(self, **kwds)
self.daemon = True # FINDME: Understand daemon
self.ID = ID
self.ncpp = ncpp
self.nproc = nproc
# MCMC setup:
self.walk = walk
self.thinning = thinning
self.fgamma = fgamma
self.fepsilon = fepsilon
self.Z = Z
self.Zsize = Zsize
self.Zchisq = Zchisq
self.Zchain = Zchain
self.chainsize = chainsize
self.M0 = M0
self.numaccept = numaccept
self.outbounds = outbounds
# Best values:
self.bestp = bestp
self.bestchisq = bestchisq
# Modeling function:
self.func = func
self.args = args
# Model, fitting, and shared parameters:
self.params = params
self.freepars = freepars
self.stepsize = stepsize
self.pmin = pmin
self.pmax = pmax
# Input/output Pipe:
self.pipe = pipe
# Data:
self.data = data
self.uncert = uncert
# Chisq function:
self.wlike = wlike
# Index of parameters:
self.ishare = np.where(self.stepsize < 0)[0] # Shared parameter indices
self.ifree = np.where(self.stepsize > 0)[0] # Free parameter indices
self.iprior = np.where(priorlow != 0) # Indices of prior'ed parameters
# Keep only the priors that count:
self.prior = prior [self.iprior]
self.priorlow = priorlow[self.iprior]
self.priorup = priorup [self.iprior]
# Size of variables:
self.nfree = np.sum(self.stepsize > 0) # Number of free parameters
self.nchains = np.shape(self.freepars)[0]
self.Zlen = np.shape(Z)[0]
# Length of mrw/demc chains:
self.chainlen = int((self.Zlen) / self.nchains)
def run(self):
"""
Process the requests queue until terminated.
"""
# Indices in Z-array to start this chains:
IDs = np.arange(self.ID, self.nchains, self.nproc)
self.index = self.M0 + IDs
for j in range(self.ncpp):
if np.any(self.Zchain==self.ID): # (i.e., resume=True)
# Set ID to the last iteration for this chain:
IDs[j] = self.index[j] = np.where(self.Zchain==IDs[j])[0][-1]
self.freepars[self.ID + j*self.nproc] = np.copy(self.Z[IDs[j]])
chisq = self.Zchisq[IDs]
nextp = np.copy(self.params) # Array for proposed sample
nextchisq = 0.0 # Chi-square of nextp
njump = 0 # Number of jumps since last Z-update
gamma = self.fgamma * 2.38 / np.sqrt(2*self.nfree)
# The numpy random system must have its seed reinitialized in
# each sub-processes to avoid identical 'random' steps.
# random.randomint is process- and thread-safe.
np.random.seed(random.randint(0, 100000))
# Run until completing the Z array:
while True:
njump += 1
normal = np.random.normal(0, self.stepsize[self.ifree], self.nfree)
if self.walk == "demc":
b = self.pipe.recv() # Synchronization flag
for j in range(self.ncpp):
ID = self.ID + j*self.nproc
mrfactor = 1.0
# Algorithm-specific proposals jumps:
if self.walk == "snooker":
# Random sampling without replacement (0 <= iR1 != iR2 < Zsize):
iR1 = np.random.randint(0, self.Zsize.value)
iR2 = np.random.randint(1, self.Zsize.value)
if iR2 == iR1:
iR2 = 0
sjump = np.random.uniform() < 0.1
if sjump: # Snooker update:
iz = np.random.randint(self.Zsize.value)
z = self.Z[iz] # Not to confuse with Z!
if np.all(z == self.freepars[ID]): # Do not project:
jump = np.random.uniform(1.2, 2.2) * (self.Z[iR2]-self.Z[iR1])
else:
dz = self.freepars[ID] - z
zp1 = np.dot(self.Z[iR1], dz)
zp2 = np.dot(self.Z[iR2], dz)
jump = np.random.uniform(1.2, 2.2) * (zp1-zp2) * dz/np.dot(dz,dz)
else: # Z update:
jump = gamma*(self.Z[iR1] - self.Z[iR2]) + self.fepsilon*normal
elif self.walk == "mrw":
jump = normal
elif self.walk == "demc":
# Select r1, r2 such that: r1 != r2 != ID:
r1 = np.random.randint(1, self.nchains)
if r1 == ID:
r1 = 0
# Pick r2 without replacement:
r2 = (r1 + np.random.randint(2, self.nchains))%self.nchains
if r2 == ID:
r2 = (r1 + 1) % self.nchains
jump = gamma*(self.freepars[r1] - self.freepars[r2]) \
+ self.fepsilon*normal
# Propose next point:
nextp[self.ifree] = np.copy(self.freepars[ID]) + jump
# Check boundaries:
outpars = np.asarray(((nextp < self.pmin) |
(nextp > self.pmax))[self.ifree])
# If any of the parameter lied out of bounds, skip model evaluation:
if np.any(outpars):
self.outbounds[:] += outpars
else:
# Update shared parameters:
for s in self.ishare:
nextp[s] = nextp[-int(self.stepsize[s])-1]
# Evaluate model:
nextchisq = self.eval_model(nextp, ret="chisq")
# Additional factor in Metropolis ratio for Snooker jump:
if sjump:
# squared norm of current and next:
cnorm = np.dot(self.freepars[ID]-z, self.freepars[ID]-z)
nnorm = np.dot(nextp[self.ifree]-z, nextp[self.ifree]-z)
mrfactor = (nnorm/cnorm)**(0.5*(self.nfree-1))
# Evaluate the Metropolis ratio:
if np.exp(0.5*(chisq[j]-nextchisq)) * mrfactor > np.random.uniform():
# Update freepars[ID]:
self.freepars[ID] = np.copy(nextp[self.ifree])
chisq[j] = nextchisq
with self.numaccept.get_lock():
self.numaccept.value += 1
# Check lowest chi-square:
if chisq[j] < self.bestchisq.value:
# with self.bestchisq.get_lock(): ??
self.bestp[self.ifree] = np.copy(self.freepars[ID])
self.bestchisq.value = chisq[j]
# Update Z if necessary:
if njump == self.thinning:
# Update Z-array size:
with self.Zsize.get_lock():
# Stop when we fill Z:
if self.Zsize.value == self.Zlen:
return
if self.walk == "snooker":
self.index[j] = self.Zsize.value
self.Zsize.value += 1
# Update values:
self.Zchain[self.index[j]] = ID
self.Z [self.index[j]] = np.copy(self.freepars[ID])
self.Zchisq[self.index[j]] = chisq[j]
self.index[j] += self.nchains
self.chainsize[ID] += 1
if njump == self.thinning:
njump = 0 # Reset njump
if self.walk == "demc":
self.pipe.send(chisq[j])
# Stop when the chain is complete:
if self.walk in ["mrw","demc"] and self.chainsize[0]==self.chainlen:
return
def eval_model(self, params, ret="model"):
"""
Evaluate the model for the requested set of parameters.
Parameters
----------
params: 1D float ndarray
The set of model fitting parameters.
ret: String
Flag to indicate what to return. Valid options:
- 'model' Return the evaluated model.
- 'chisq' Return chi-square.
- 'both' Return a list with the model and chisq.
"""
if self.wlike:
model = self.func(params[0:-3], *self.args)
else:
model = self.func(params, *self.args)
# Reject proposed iteration if any model value is infinite:
if np.any(model == np.inf):
chisq = np.inf
else:
# Calculate prioroff = params-prior:
prioroff = params[self.iprior] - self.prior
# Calculate chisq:
if self.wlike:
chisq = dwt.wlikelihood(params[-3:], model, self.data, prioroff,
self.priorlow, self.priorup)
else:
chisq = cs.chisq(model, self.data, self.uncert,
prioroff, self.priorlow, self.priorup)
# Return evaluated model if requested:
if ret == "both":
return [model, chisq]
elif ret == "chisq":
return chisq
else: # ret == "model"
return model
|
manqala/erpnext | refs/heads/develop | erpnext/patches/v7_0/update_conversion_factor_in_supplier_quotation_item.py | 53 | from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('buying', 'doctype', 'supplier_quotation_item')
frappe.db.sql("""update
`tabSupplier Quotation Item` as sqi_t,
(select sqi.item_code as item_code, sqi.uom as uom, ucd.conversion_factor as conversion_factor
from `tabSupplier Quotation Item` sqi left join `tabUOM Conversion Detail` ucd
on ucd.uom = sqi.uom and sqi.item_code = ucd.parent) as conversion_data,
`tabItem` as item
set
sqi_t.conversion_factor= ifnull(conversion_data.conversion_factor, 1),
sqi_t.stock_qty = (ifnull(conversion_data.conversion_factor, 1) * sqi_t.qty),
sqi_t.stock_uom = item.stock_uom
where
sqi_t.item_code = conversion_data.item_code and
sqi_t.uom = conversion_data.uom and sqi_t.item_code = item.name""") |
vim-scripts/Python-mode-klen | refs/heads/master | pymode/async.py | 26 | """ Python-mode async support. """
from ._compat import Queue
RESULTS = Queue()
|
daevaorn/sentry | refs/heads/master | tests/sentry/lang/javascript/test_sourcemaps.py | 28 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.lang.javascript.sourcemaps import (
SourceMap, parse_vlq, parse_sourcemap, sourcemap_to_index, find_source
)
from sentry.testutils import TestCase
from sentry.utils import json
sourcemap = """{
"version":3,
"file":"file.min.js",
"sources":["file1.js","file2.js"],
"names":["add","a","b","multiply","divide","c","e","Raven","captureException"],
"mappings":"AAAA,QAASA,KAAIC,EAAGC,GACf,YACA,OAAOD,GAAIC,ECFZ,QAASC,UAASF,EAAGC,GACpB,YACA,OAAOD,GAAIC,EAEZ,QAASE,QAAOH,EAAGC,GAClB,YACA,KACC,MAAOC,UAASH,IAAIC,EAAGC,GAAID,EAAGC,GAAKG,EAClC,MAAOC,GACRC,MAAMC,iBAAiBF",
"sourceRoot": "foo"
}"""
class ParseVlqTest(TestCase):
def test_simple(self):
assert parse_vlq('gqjG') == [100000]
assert parse_vlq('hqjG') == [-100000]
assert parse_vlq('DFLx+BhqjG') == [-1, -2, -5, -1000, -100000]
assert parse_vlq('CEKw+BgqjG') == [1, 2, 5, 1000, 100000]
assert parse_vlq('/+Z') == [-13295]
class FindSourceTest(TestCase):
def test_simple(self):
indexed_sourcemap = sourcemap_to_index(sourcemap)
result = find_source(indexed_sourcemap, 1, 56)
assert result == SourceMap(dst_line=0, dst_col=50, src='foo/file2.js', src_line=0, src_col=9, name='multiply')
class ParseSourcemapTest(TestCase):
def test_basic(self):
smap = json.loads(sourcemap)
states = list(parse_sourcemap(smap))
assert states == [
SourceMap(dst_line=0, dst_col=0, src='foo/file1.js', src_line=0, src_col=0, name=None),
SourceMap(dst_line=0, dst_col=8, src='foo/file1.js', src_line=0, src_col=9, name='add'),
SourceMap(dst_line=0, dst_col=13, src='foo/file1.js', src_line=0, src_col=13, name='a'),
SourceMap(dst_line=0, dst_col=15, src='foo/file1.js', src_line=0, src_col=16, name='b'),
SourceMap(dst_line=0, dst_col=18, src='foo/file1.js', src_line=1, src_col=1, name=None),
SourceMap(dst_line=0, dst_col=30, src='foo/file1.js', src_line=2, src_col=1, name=None),
SourceMap(dst_line=0, dst_col=37, src='foo/file1.js', src_line=2, src_col=8, name='a'),
SourceMap(dst_line=0, dst_col=40, src='foo/file1.js', src_line=2, src_col=12, name='b'),
SourceMap(dst_line=0, dst_col=42, src='foo/file2.js', src_line=0, src_col=0, name=None),
SourceMap(dst_line=0, dst_col=50, src='foo/file2.js', src_line=0, src_col=9, name='multiply'),
SourceMap(dst_line=0, dst_col=60, src='foo/file2.js', src_line=0, src_col=18, name='a'),
SourceMap(dst_line=0, dst_col=62, src='foo/file2.js', src_line=0, src_col=21, name='b'),
SourceMap(dst_line=0, dst_col=65, src='foo/file2.js', src_line=1, src_col=1, name=None),
SourceMap(dst_line=0, dst_col=77, src='foo/file2.js', src_line=2, src_col=1, name=None),
SourceMap(dst_line=0, dst_col=84, src='foo/file2.js', src_line=2, src_col=8, name='a'),
SourceMap(dst_line=0, dst_col=87, src='foo/file2.js', src_line=2, src_col=12, name='b'),
SourceMap(dst_line=0, dst_col=89, src='foo/file2.js', src_line=4, src_col=0, name=None),
SourceMap(dst_line=0, dst_col=97, src='foo/file2.js', src_line=4, src_col=9, name='divide'),
SourceMap(dst_line=0, dst_col=105, src='foo/file2.js', src_line=4, src_col=16, name='a'),
SourceMap(dst_line=0, dst_col=107, src='foo/file2.js', src_line=4, src_col=19, name='b'),
SourceMap(dst_line=0, dst_col=110, src='foo/file2.js', src_line=5, src_col=1, name=None),
SourceMap(dst_line=0, dst_col=122, src='foo/file2.js', src_line=6, src_col=1, name=None),
SourceMap(dst_line=0, dst_col=127, src='foo/file2.js', src_line=7, src_col=2, name=None),
SourceMap(dst_line=0, dst_col=133, src='foo/file2.js', src_line=7, src_col=9, name='multiply'),
SourceMap(dst_line=0, dst_col=143, src='foo/file2.js', src_line=7, src_col=18, name='add'),
SourceMap(dst_line=0, dst_col=147, src='foo/file2.js', src_line=7, src_col=22, name='a'),
SourceMap(dst_line=0, dst_col=149, src='foo/file2.js', src_line=7, src_col=25, name='b'),
SourceMap(dst_line=0, dst_col=152, src='foo/file2.js', src_line=7, src_col=29, name='a'),
SourceMap(dst_line=0, dst_col=154, src='foo/file2.js', src_line=7, src_col=32, name='b'),
SourceMap(dst_line=0, dst_col=157, src='foo/file2.js', src_line=7, src_col=37, name='c'),
SourceMap(dst_line=0, dst_col=159, src='foo/file2.js', src_line=8, src_col=3, name=None),
SourceMap(dst_line=0, dst_col=165, src='foo/file2.js', src_line=8, src_col=10, name='e'),
SourceMap(dst_line=0, dst_col=168, src='foo/file2.js', src_line=9, src_col=2, name='Raven'),
SourceMap(dst_line=0, dst_col=174, src='foo/file2.js', src_line=9, src_col=8, name='captureException'),
SourceMap(dst_line=0, dst_col=191, src='foo/file2.js', src_line=9, src_col=25, name='e'),
]
|
xzh86/scikit-learn | refs/heads/master | sklearn/preprocessing/__init__.py | 268 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
AMOboxTV/AMOBox.LegoBuild | refs/heads/master | script.module.unidecode/lib/unidecode/x1d7.py | 248 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'0', # 0xce
'1', # 0xcf
'2', # 0xd0
'3', # 0xd1
'4', # 0xd2
'5', # 0xd3
'6', # 0xd4
'7', # 0xd5
'8', # 0xd6
'9', # 0xd7
'0', # 0xd8
'1', # 0xd9
'2', # 0xda
'3', # 0xdb
'4', # 0xdc
'5', # 0xdd
'6', # 0xde
'7', # 0xdf
'8', # 0xe0
'9', # 0xe1
'0', # 0xe2
'1', # 0xe3
'2', # 0xe4
'3', # 0xe5
'4', # 0xe6
'5', # 0xe7
'6', # 0xe8
'7', # 0xe9
'8', # 0xea
'9', # 0xeb
'0', # 0xec
'1', # 0xed
'2', # 0xee
'3', # 0xef
'4', # 0xf0
'5', # 0xf1
'6', # 0xf2
'7', # 0xf3
'8', # 0xf4
'9', # 0xf5
'0', # 0xf6
'1', # 0xf7
'2', # 0xf8
'3', # 0xf9
'4', # 0xfa
'5', # 0xfb
'6', # 0xfc
'7', # 0xfd
'8', # 0xfe
'9', # 0xff
)
|
danielbruns-wf/rockstar | refs/heads/master | examples/scheme_rockstar.py | 22 | from rockstar import RockStar
scheme_code = '(display "Hello world")'
rock_it_bro = RockStar(days=400, file_name='helloWorld.scm', code=scheme_code)
rock_it_bro.make_me_a_rockstar()
|
ContinuumIO/numpy | refs/heads/master | numpy/testing/tests/test_doctesting.py | 224 | """ Doctests for NumPy-specific nose/doctest modifications
"""
from __future__ import division, absolute_import, print_function
# try the #random directive on the output line
def check_random_directive():
'''
>>> 2+2
<BadExample object at 0x084D05AC> #random: may vary on your system
'''
# check the implicit "import numpy as np"
def check_implicit_np():
'''
>>> np.array([1,2,3])
array([1, 2, 3])
'''
# there's some extraneous whitespace around the correct responses
def check_whitespace_enabled():
'''
# whitespace after the 3
>>> 1+2
3
# whitespace before the 7
>>> 3+4
7
'''
def check_empty_output():
""" Check that no output does not cause an error.
This is related to nose bug 445; the numpy plugin changed the
doctest-result-variable default and therefore hit this bug:
http://code.google.com/p/python-nose/issues/detail?id=445
>>> a = 10
"""
def check_skip():
""" Check skip directive
The test below should not run
>>> 1/0 #doctest: +SKIP
"""
if __name__ == '__main__':
# Run tests outside numpy test rig
import nose
from numpy.testing.noseclasses import NumpyDoctest
argv = ['', __file__, '--with-numpydoctest']
nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()])
|
fabian-paul/PyEMMA | refs/heads/devel | pyemma/_ext/variational/util.py | 3 | """ Add convenience functions here if needed
"""
__author__ = 'noe'
import numpy as _np
class ZeroRankError(_np.linalg.LinAlgError):
"""Input matrix has rank zero."""
pass
def features_to_basis(infiles, basisset, outfiles):
"""Reads input files
basisset : BasisSet object
basis set tob e used
References
---------
.. [5] Vitalini, F., Noe, F. and Keller, B. (2015):
A basis set for peptides for the variational approach to conformational kinetics. (In review).
"""
# cycle through input files
# read infile
# map to basis function values
# write outfile
pass
|
apark263/tensorflow | refs/heads/master | tensorflow/python/ops/parallel_for/math_test.py | 2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of math kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTestCase):
def test_unary_cwise_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.bessel_i0e,
math_ops.bessel_i1e,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
nn.selu,
nn.softplus,
nn.softsign,
]
for op in complex_ops + real_ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if op in complex_ops:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil,
math_ops.floor,
math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=x.dtype)
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)),
x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
math_ops.floor_div,
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
safe_zeta,
]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.bool])
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min
]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
x_shape = [2, 3, 4, 5, 6]
x = random_ops.random_uniform(x_shape)
for data_format in ("NCHW", "NHWC"):
with backprop.GradientTape(persistent=True) as g:
bias_dim = 2 if data_format == "NCHW" else -1
bias_shape = x_shape[bias_dim]
bias = random_ops.random_uniform([bias_shape])
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i)
y = nn.bias_add(a, bias, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
return y, g.gradient(loss, bias)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.float32])
def test_unsorted_segment_sum(self):
t = random_ops.random_uniform([3, 3, 2])
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]])
num_segments = 3
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
return (math_ops.unsorted_segment_sum(data, seg_ids, num_segments),
math_ops.unsorted_segment_sum(data_0, seg_ids, num_segments))
self._test_loop_fn(loop_fn, 3, [dtypes.float32] * 2)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
cond = constant_op.constant([True, False])
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
|
garwynn/L900_3.4_Experiment | refs/heads/master | Documentation/target/tcm_mod_builder.py | 4981 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
itsjeyd/edx-platform | refs/heads/master | common/lib/capa/capa/tests/test_customrender.py | 37 | from lxml import etree
import unittest
import xml.sax.saxutils as saxutils
from capa.tests.helpers import test_capa_system
from capa import customrender
# just a handy shortcut
lookup_tag = customrender.registry.get_class_for_tag
def extract_context(xml):
"""
Given an xml element corresponding to the output of test_capa_system.render_template, get back the
original context
"""
return eval(xml.text)
def quote_attr(s):
return saxutils.quoteattr(s)[1:-1] # don't want the outer quotes
class HelperTest(unittest.TestCase):
'''
Make sure that our helper function works!
'''
def check(self, d):
xml = etree.XML(test_capa_system().render_template('blah', d))
self.assertEqual(d, extract_context(xml))
def test_extract_context(self):
self.check({})
self.check({1, 2})
self.check({'id', 'an id'})
self.check({'with"quote', 'also"quote'})
class SolutionRenderTest(unittest.TestCase):
'''
Make sure solutions render properly.
'''
def test_rendering(self):
solution = 'To compute unicorns, count them.'
xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution)
element = etree.fromstring(xml_str)
renderer = lookup_tag('solution')(test_capa_system(), element)
self.assertEqual(renderer.id, 'solution_12')
# Our test_capa_system "renders" templates to a div with the repr of the context.
xml = renderer.get_html()
context = extract_context(xml)
self.assertEqual(context, {'id': 'solution_12'})
class MathRenderTest(unittest.TestCase):
'''
Make sure math renders properly.
'''
def check_parse(self, latex_in, mathjax_out):
xml_str = """<math>{tex}</math>""".format(tex=latex_in)
element = etree.fromstring(xml_str)
renderer = lookup_tag('math')(test_capa_system(), element)
self.assertEqual(renderer.mathstr, mathjax_out)
def test_parsing(self):
self.check_parse('$abc$', '[mathjaxinline]abc[/mathjaxinline]')
self.check_parse('$abc', '$abc')
self.check_parse(r'$\displaystyle 2+2$', '[mathjax] 2+2[/mathjax]')
# NOTE: not testing get_html yet because I don't understand why it's doing what it's doing.
|
alfredodeza/ceph-deploy | refs/heads/master | ceph_deploy/tests/unit/test_new.py | 18 | from ceph_deploy import new
from ceph_deploy.tests import util
import pytest
class TestValidateHostIp(object):
def test_for_all_subnets_all_ips_match(self):
ips = util.generate_ips("10.0.0.1", "10.0.0.40")
ips.extend(util.generate_ips("10.0.1.1", "10.0.1.40"))
subnets = ["10.0.0.1/16", "10.0.1.1/16"]
assert new.validate_host_ip(ips, subnets) is None
def test_all_subnets_have_one_matching_ip(self):
ips = util.generate_ips("10.0.0.1", "10.0.0.40")
ips.extend(util.generate_ips("10.0.1.1", "10.0.1.40"))
# regardless of extra IPs that may not match. The requirement
# is already satisfied
ips.extend(util.generate_ips("10.1.2.1", "10.1.2.40"))
subnets = ["10.0.0.1/16", "10.0.1.1/16"]
assert new.validate_host_ip(ips, subnets) is None
def test_not_all_subnets_have_one_matching_ip(self):
ips = util.generate_ips("10.0.0.1", "10.0.0.40")
ips.extend(util.generate_ips("10.0.1.1", "10.0.1.40"))
subnets = ["10.0.0.1/16", "10.1.1.1/16"]
with pytest.raises(RuntimeError):
new.validate_host_ip(ips, subnets)
|
krafczyk/spack | refs/heads/develop | lib/spack/spack/build_systems/makefile.py | 5 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import inspect
import llnl.util.tty as tty
from llnl.util.filesystem import working_dir
from spack.package import PackageBase, run_after
class MakefilePackage(PackageBase):
"""Specialized class for packages that are built using editable Makefiles
This class provides three phases that can be overridden:
1. :py:meth:`~.MakefilePackage.edit`
2. :py:meth:`~.MakefilePackage.build`
3. :py:meth:`~.MakefilePackage.install`
It is usually necessary to override the :py:meth:`~.MakefilePackage.edit`
phase, while :py:meth:`~.MakefilePackage.build` and
:py:meth:`~.MakefilePackage.install` have sensible defaults.
For a finer tuning you may override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:attr:`~.MakefilePackage.build_targets` | Specify ``make`` |
| | targets for the |
| | build phase |
+-----------------------------------------------+--------------------+
| :py:attr:`~.MakefilePackage.install_targets` | Specify ``make`` |
| | targets for the |
| | install phase |
+-----------------------------------------------+--------------------+
| :py:meth:`~.MakefilePackage.build_directory` | Directory where the|
| | Makefile is located|
+-----------------------------------------------+--------------------+
"""
#: Phases of a package that is built with an hand-written Makefile
phases = ['edit', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'MakefilePackage'
#: Targets for ``make`` during the :py:meth:`~.MakefilePackage.build`
#: phase
build_targets = []
#: Targets for ``make`` during the :py:meth:`~.MakefilePackage.install`
#: phase
install_targets = ['install']
#: Callback names for build-time test
build_time_test_callbacks = ['check']
#: Callback names for install-time test
install_time_test_callbacks = ['installcheck']
@property
def build_directory(self):
"""Returns the directory containing the main Makefile
:return: build directory
"""
return self.stage.source_path
def edit(self, spec, prefix):
"""Edits the Makefile before calling make. This phase cannot
be defaulted.
"""
tty.msg('Using default implementation: skipping edit phase.')
def build(self, spec, prefix):
"""Calls make, passing :py:attr:`~.MakefilePackage.build_targets`
as targets.
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.build_targets)
def install(self, spec, prefix):
"""Calls make, passing :py:attr:`~.MakefilePackage.install_targets`
as targets.
"""
with working_dir(self.build_directory):
inspect.getmodule(self).make(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the Makefile for targets ``test`` and ``check``
and runs them if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('test')
self._if_make_target_execute('check')
run_after('install')(PackageBase._run_default_install_time_test_callbacks)
def installcheck(self):
"""Searches the Makefile for an ``installcheck`` target
and runs it if found.
"""
with working_dir(self.build_directory):
self._if_make_target_execute('installcheck')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
|
mathewpower/djangorestframework-queryparams | refs/heads/master | rest_framework_queryparams/schema.py | 1 | from rest_framework import serializers
from rest_framework.schemas import SchemaGenerator as _SchemaGenerator
from rest_framework.compat import coreapi, urlparse
class SchemaGenerator(_SchemaGenerator):
def get_link(self, path, method, view):
"""
Return a `coreapi.Link` instance for the given endpoint.
"""
fields = self.get_path_fields(path, method, view)
fields += self.get_serializer_fields(path, method, view)
fields += self.get_pagination_fields(path, method, view)
fields += self.get_filter_fields(path, method, view)
fields += self.get_query_fields(path, method, view)
if fields and any([field.location in ('form', 'body') for field in fields]):
encoding = self.get_encoding(path, method, view)
else:
encoding = None
if self.url and path.startswith('/'):
path = path[1:]
return coreapi.Link(
url=urlparse.urljoin(self.url, path),
action=method.lower(),
encoding=encoding,
fields=fields
)
def get_query_fields(self, path, method, view):
"""
Return a list of `coreapi.Field` instances corresponding to any
request body input, as determined by the query serializer class.
"""
if hasattr(view, 'query_serializer_class'):
if view.query_serializer_class:
serializer = view.query_serializer_class()
else:
return []
else:
return []
if isinstance(serializer, serializers.ListSerializer):
return [coreapi.Field(name='data', location='query', required=True)]
if not isinstance(serializer, serializers.Serializer):
return []
fields = []
for field in serializer.fields.values():
if field.read_only or isinstance(field, serializers.HiddenField):
continue
required = field.required and method != 'PATCH'
description = coreapi.force_text(field.help_text) if field.help_text else ''
field = coreapi.Field(
name=field.source,
location='query',
required=required,
description=description
)
fields.append(field)
return fields
|
spaceone/pyjs | refs/heads/master | pyjswidgets/pyjslib.PyJS.py | 7 | # Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# iteration from Bob Ippolito's Iteration in JavaScript
# must declare import _before_ importing sys
# FIXME: dynamic=1, async=False, init=True are useless here (?)
def import_module(path, parent_module, module_name, dynamic=1, async=False, init=True):
module = None
JS("""
@{{module}} = $pyjs['modules_hash'][@{{module_name}}];
if (typeof @{{module}} == 'function' && @{{module}}['__was_initialized__'] == true) {
return null;
}
if (@{{module_name}} == 'sys' || @{{module_name}} == 'pyjslib') {
@{{module}}();
return null;
}
""")
names = module_name.split(".")
importName = ''
# Import all modules in the chain (import a.b.c)
for name in names:
importName += name
JS("""@{{module}} = $pyjs['modules_hash'][@{{importName}}];""")
if isUndefined(module):
raise ImportError("No module named " + importName)
if JS("@{{module}}['__was_initialized__'] != true"):
# Module wasn't initialized
module()
importName += '.'
return None
# FIXME: dynamic=1, async=False are useless here (?). Only dynamic modules
# are loaded with load_module and it's always "async"
@noSourceTracking
def load_module(path, parent_module, module_name, dynamic=1, async=False):
"""
"""
JS("""
var cache_file;
var module = $pyjs['modules_hash'][@{{module_name}}];
if (typeof module == 'function') {
return true;
}
if (!@{{dynamic}}) {
// There's no way we can load a none dynamic module
return false;
}
if (@{{path}} == null)
{
@{{path}} = './';
}
var override_name = @{{sys}}['platform'] + "." + @{{module_name}};
if (((@{{sys}}['overrides'] != null) &&
(@{{sys}}['overrides']['has_key'](override_name))))
{
cache_file = @{{sys}}['overrides']['__getitem__'](override_name) ;
}
else
{
cache_file = @{{module_name}} ;
}
cache_file = (@{{path}} + cache_file + '['cache']['js']' ) ;
//alert("cache " + cache_file + " " + module_name + " " + parent_module);
var onload_fn = '';
// this one tacks the script onto the end of the DOM
pyjs_load_script(cache_file, onload_fn, @{{async}});
try {
var loaded = (typeof $pyjs['modules_hash'][@{{module_name}}] == 'function')
} catch ( e ) {
}
if (loaded) {
return true;
}
return false;
""")
@noSourceTracking
def load_module_wait(proceed_fn, parent_mod, module_list, dynamic):
module_list = module_list.getArray()
JS("""
var wait_count = 0;
//var data = '';
//var element = $doc['createElement']("div");
//element['innerHTML'] = '';
//$doc['body']['appendChild'](element);
//function write_dom(txt) {
// element['innerHTML'] += txt;
//}
var timeoutperiod = 1;
if (@{{dynamic}})
var timeoutperiod = 1;
var wait = function() {
wait_count++;
//write_dom(".");
var loaded = true;
for (var i in @{{module_list}}) {
if (typeof $pyjs['modules_hash'][@{{module_list}}[i]] != 'function') {
loaded = false;
break;
}
}
if (!loaded) {
setTimeout(wait, timeoutperiod);
} else {
if (@{{proceed_fn}}['importDone'])
@{{proceed_fn}}['importDone'](@{{proceed_fn}});
else
@{{proceed_fn}}();
//$doc['body']['removeChild'](element);
}
}
//write_dom("Loading modules ");
wait();
""")
class Modload:
# All to-be-imported module names are in app_modlist
# Since we're only _loading_ the modules here, we can do that in almost
# any order. There's one limitation: a child/sub module cannot be loaded
# unless its parent is loaded. It has to be chained in the module list.
# (1) $pyjs.modules.pyjamas
# (2) $pyjs.modules.pyjamas.ui
# (3) $pyjs.modules.pyjamas.ui.Widget
# Therefore, all modules are collected and sorted on the depth (i.e. the
# number of dots in it)
# As long as we don't move on to the next depth unless all modules of the
# previous depth are loaded, we won't trun into unchainable modules
# The execution of the module code is done when the import statement is
# reached, or after loading the modules for the main module.
@noSourceTracking
def __init__(self, path, app_modlist, app_imported_fn, dynamic,
parent_mod):
self.app_modlist = app_modlist
self.app_imported_fn = app_imported_fn
self.path = path
self.dynamic = dynamic
self.parent_mod = parent_mod
self.modules = {}
for modlist in self.app_modlist:
for mod in modlist:
depth = len(mod.split('.'))
if not self.modules.has_key(depth):
self.modules[depth] = []
self.modules[depth].append(mod)
self.depths = self.modules.keys()
self.depths.sort()
self.depths.reverse()
@noSourceTracking
def next(self):
if not self.dynamic:
# All modules are static. Just start the main module.
self.app_imported_fn()
return
depth = self.depths.pop()
# Initiate the loading of the modules.
for app in self.modules[depth]:
load_module(self.path, self.parent_mod, app, self.dynamic, True);
if len(self.depths) == 0:
# This is the last depth. Start the main module after loading these
# modules.
load_module_wait(self.app_imported_fn, self.parent_mod, self.modules[depth], self.dynamic)
else:
# After loading the modules, to the next depth.
load_module_wait(getattr(self, "next"), self.parent_mod, self.modules[depth], self.dynamic)
def get_module(module_name):
ev = "__mod = %s;" % module_name
JS("pyjs_eval(@{{ev}});")
return __mod
def preload_app_modules(path, app_modnames, app_imported_fn, dynamic,
parent_mod=None):
loader = Modload(path, app_modnames, app_imported_fn, dynamic, parent_mod)
loader.next()
class BaseException:
message = ''
def __init__(self, *args):
self.args = args
if len(args) == 1:
self.message = args[0]
def __getitem__(self, index):
return self.args.__getitem__(index)
def __str__(self):
if len(self.args) is 0:
return ''
elif len(self.args) is 1:
return str(self.message)
return repr(self.args)
def __repr__(self):
return self.__name__ + repr(self.args)
def toString(self):
return str(self)
class Exception(BaseException):
pass
class StandardError(Exception):
pass
class TypeError(StandardError):
pass
class AttributeError(StandardError):
def toString(self):
return "AttributeError: %s of %s" % (self.args[1], self.args[0])
class NameError(StandardError):
pass
class ValueError(StandardError):
pass
class ImportError(StandardError):
pass
class LookupError(StandardError):
def toString(self):
return self.__name__ + ": " + self.args[0]
class KeyError(LookupError):
def __str__(self):
if len(self.args) is 0:
return ''
elif len(self.args) is 1:
return repr(self.message)
return repr(self.args)
class IndexError(LookupError):
pass
# There seems to be an bug in Chrome with accessing the message
# property, on which an error is thrown
# Hence the declaration of 'var message' and the wrapping in try..catch
def init():
JS("""
pyjslib['_errorMapping'] = function(err) {
if (err instanceof(ReferenceError) || err instanceof(TypeError)) {
var message = ''
try {
message = err['message'];
} catch ( e) {
}
return pyjslib['AttributeError'](message);
}
return err
}
pyjslib['TryElse'] = function () { };
pyjslib['TryElse']['prototype'] = new Error();
pyjslib['TryElse']['__name__'] = 'TryElse';
pyjslib['TryElse']['message'] = 'TryElse';
pyjslib['StopIteration'] = function () { };
pyjslib['StopIteration']['prototype'] = new Error();
pyjslib['StopIteration']['__name__'] = 'StopIteration';
pyjslib['StopIteration']['message'] = 'StopIteration';
pyjslib['String_find'] = function(sub, start, end) {
var pos=this['indexOf'](sub, start);
if (pyjslib['isUndefined'](end)) return pos;
if (pos + sub['length']>end) return -1;
return pos;
}
pyjslib['String_join'] = function(data) {
var text="";
if (pyjslib['isArray'](data)) {
return data['join'](this);
}
else if (pyjslib['isIteratable'](data)) {
var iter=data['__iter__']();
try {
text+=iter['next']();
while (true) {
var item=iter['next']();
text+=this + item;
}
}
catch (e) {
if (e['__name__'] != 'StopIteration') throw e;
}
}
return text;
}
pyjslib['String_isdigit'] = function() {
return (this['match'](/^\d+$/g) != null);
}
pyjslib['String_replace'] = function(old, replace, count) {
var do_max=false;
var start=0;
var new_str="";
var pos=0;
if (!pyjslib['isString'](old)) return this['__replace'](old, replace);
if (!pyjslib['isUndefined'](count)) do_max=true;
while (start<this['length']) {
if (do_max && !count--) break;
pos=this['indexOf'](old, start);
if (pos<0) break;
new_str+=this['substring'](start, pos) + replace;
start=pos+old['length'];
}
if (start<this['length']) new_str+=this['substring'](start);
return new_str;
}
pyjslib['String_split'] = function(sep, maxsplit) {
var items=new pyjslib['List']();
var do_max=false;
var subject=this;
var start=0;
var pos=0;
if (pyjslib['isUndefined'](sep) || pyjslib['isNull'](sep)) {
sep=" ";
subject=subject['strip']();
subject=subject['replace'](/\s+/g, sep);
}
else if (!pyjslib['isUndefined'](maxsplit)) do_max=true;
if (subject['length'] == 0) {
return items;
}
while (start<subject['length']) {
if (do_max && !maxsplit--) break;
pos=subject['indexOf'](sep, start);
if (pos<0) break;
items['append'](subject['substring'](start, pos));
start=pos+sep['length'];
}
if (start<=subject['length']) items['append'](subject['substring'](start));
return items;
}
pyjslib['String___iter__'] = function() {
var i = 0;
var s = this;
return {
'next': function() {
if (i >= s['length']) {
throw pyjslib['StopIteration'];
}
return s['substring'](i++, i, 1);
},
'__iter__': function() {
return this;
}
};
}
pyjslib['String_strip'] = function(chars) {
return this['lstrip'](chars)['rstrip'](chars);
}
pyjslib['String_lstrip'] = function(chars) {
if (pyjslib['isUndefined'](chars)) return this['replace'](/^\s+/, "");
return this['replace'](new RegExp("^[" + chars + "]+"), "");
}
pyjslib['String_rstrip'] = function(chars) {
if (pyjslib['isUndefined'](chars)) return this['replace'](/\s+$/, "");
return this['replace'](new RegExp("[" + chars + "]+$"), "");
}
pyjslib['String_startswith'] = function(prefix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (pyjslib['isUndefined'](start)) start = 0;
if (pyjslib['isUndefined'](end)) end = this['length'];
if ((end - start) < prefix['length']) return false
if (this['substr'](start, prefix['length']) == prefix) return true;
return false;
}
pyjslib['String_endswith'] = function(suffix, start, end) {
// FIXME: accept tuples as suffix (since 2.5)
if (pyjslib['isUndefined'](start)) start = 0;
if (pyjslib['isUndefined'](end)) end = this['length'];
if ((end - start) < suffix['length']) return false
if (this['substr'](end - suffix['length'], suffix['length']) == suffix) return true;
return false;
}
pyjslib['String_ljust'] = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (pyjslib['TypeError']("an integer is required"));
}
if (pyjslib['isUndefined'](fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar['length'] != 1) {
throw (pyjslib['TypeError']("ljust() argument 2 must be char, not " + typeof(fillchar)));
}
if (this['length'] >= width) return this;
return this + new Array(width+1 - this['length'])['join'](fillchar);
}
pyjslib['String_rjust'] = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (pyjslib['TypeError']("an integer is required"));
}
if (pyjslib['isUndefined'](fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar['length'] != 1) {
throw (pyjslib['TypeError']("rjust() argument 2 must be char, not " + typeof(fillchar)));
}
if (this['length'] >= width) return this;
return new Array(width + 1 - this['length'])['join'](fillchar) + this;
}
pyjslib['String_center'] = function(width, fillchar) {
if (typeof(width) != 'number' ||
parseInt(width) != width) {
throw (pyjslib['TypeError']("an integer is required"));
}
if (pyjslib['isUndefined'](fillchar)) fillchar = ' ';
if (typeof(fillchar) != 'string' ||
fillchar['length'] != 1) {
throw (pyjslib['TypeError']("center() argument 2 must be char, not " + typeof(fillchar)));
}
if (this['length'] >= width) return this;
var padlen = width - this['length'];
var right = Math['ceil'](padlen / 2);
var left = padlen - right;
return new Array(left+1)['join'](fillchar) + this + new Array(right+1)['join'](fillchar);
}
pyjslib['abs'] = Math['abs'];
""")
class Class:
def __init__(self, name):
self.name = name
def __str___(self):
return self.name
@noSourceTracking
def eq(a,b):
# All 'python' classes and types are implemented as objects/functions.
# So, for speed, do a typeof X / X.__cmp__ on a/b.
# Checking for the existance of .__cmp__ is expensive...
JS("""
if (@{{a}} === null) {
if (@{{b}} === null) return true;
return false;
}
if (@{{b}} === null) {
return false;
}
if ((typeof @{{a}} == 'object' || typeof @{{a}} == 'function') && typeof @{{a}}['__cmp__'] == 'function') {
return @{{a}}['__cmp__'](@{{b}}) == 0;
} else if ((typeof @{{b}} == 'object' || typeof @{{b}} == 'function') && typeof @{{b}}['__cmp__'] == 'function') {
return @{{b}}['__cmp__'](@{{a}}) == 0;
}
return @{{a}} == @{{b}};
""")
@noSourceTracking
def cmp(a,b):
JS("""
if (@{{a}} === null) {
if (@{{b}} === null) return 0;
return -1;
}
if (@{{b}} === null) {
return 1;
}
if ((typeof @{{a}} == 'object' || typeof @{{a}} == 'function') && typeof @{{a}}['__cmp__'] == 'function') {
return @{{a}}['__cmp__'](@{{b}});
} else if ((typeof @{{b}} == 'object' || typeof @{{b}} == 'function') && typeof @{{b}}['__cmp__'] == 'function') {
return -@{{b}}['__cmp__'](@{{a}});
}
if (@{{a}} > @{{b}}) return 1;
if (@{{b}} > @{{a}}) return -1;
return 0;
""")
# For list.sort()
__cmp = cmp
@noSourceTracking
def bool(v):
# this needs to stay in native code without any dependencies here,
# because this is used by if and while, we need to prevent
# recursion
JS("""
if (!@{{v}}) return false;
switch(typeof @{{v}}){
case 'boolean':
return @{{v}};
case 'object':
if (@{{v}}['__nonzero__']){
return @{{v}}['__nonzero__']();
}else if (@{{v}}['__len__']){
return @{{v}}['__len__']()>0;
}
return true;
}
return Boolean(@{{v}});
""")
class List:
@noSourceTracking
def __init__(self, data=None):
JS("""
this['l'] = [];
this['extend'](@{{data}});
""")
@noSourceTracking
def append(self, item):
JS(""" this['l'][this['l']['length']] = @{{item}};""")
@noSourceTracking
def extend(self, data):
JS("""
if (pyjslib['isArray'](@{{data}})) {
var n = this['l']['length'];
for (var i=0; i < @{{data}}['length']; i++) {
this['l'][n+i]=@{{data}}[i];
}
}
else if (pyjslib['isIteratable'](@{{data}})) {
var iter=@{{data}}['__iter__']();
var i=this['l']['length'];
try {
while (true) {
var item=iter['next']();
this['l'][i++]=item;
}
}
catch (e) {
if (e['__name__'] != 'StopIteration') throw e;
}
}
""")
@noSourceTracking
def remove(self, value):
JS("""
var index=this['index'](@{{value}});
if (index<0) return false;
this['l']['splice'](index, 1);
return true;
""")
@noSourceTracking
def index(self, value, start=0):
JS("""
var length=this['l']['length'];
for (var i=@{{start}}; i<length; i++) {
if (this['l'][i]==@{{value}}) {
return i;
}
}
return -1;
""")
@noSourceTracking
def insert(self, index, value):
JS(""" var a = this['l']; this['l']=a['slice'](0, @{{index}})['concat'](@{{value}}, a['slice'](@{{index}}));""")
@noSourceTracking
def pop(self, index = -1):
JS("""
if (@{{index}}<0) @{{index}} = this['l']['length'] + @{{index}};
var a = this['l'][@{{index}}];
this['l']['splice'](@{{index}}, 1);
return a;
""")
@noSourceTracking
def __cmp__(self, l):
if not isinstance(l, List):
return -1
ll = len(self) - len(l)
if ll != 0:
return ll
for x in range(len(l)):
ll = cmp(self.__getitem__(x), l[x])
if ll != 0:
return ll
return 0
@noSourceTracking
def slice(self, lower, upper):
JS("""
if (@{{upper}}==null) return pyjslib['List'](this['l']['slice'](@{{lower}}));
return pyjslib['List'](this['l']['slice'](@{{lower}}, @{{upper}}));
""")
@noSourceTracking
def __getitem__(self, index):
JS("""
if (@{{index}}<0) @{{index}} = this['l']['length'] + @{{index}};
return this['l'][@{{index}}];
""")
@noSourceTracking
def __setitem__(self, index, value):
JS(""" this['l'][@{{index}}]=@{{value}};""")
@noSourceTracking
def __delitem__(self, index):
JS(""" this['l']['splice'](@{{index}}, 1);""")
@noSourceTracking
def __len__(self):
JS(""" return this['l']['length'];""")
@noSourceTracking
def __contains__(self, value):
return self.index(value) >= 0
@noSourceTracking
def __iter__(self):
JS("""
var i = 0;
var l = this['l'];
return {
'next': function() {
if (i >= l['length']) {
throw pyjslib['StopIteration'];
}
return l[i++];
},
'__iter__': function() {
return this;
}
};
""")
@noSourceTracking
def reverse(self):
JS(""" this['l']['reverse']();""")
def sort(self, cmp=None, key=None, reverse=False):
if not cmp:
cmp = __cmp
if key and reverse:
def thisSort1(a,b):
return -cmp(key(a), key(b))
self.l.sort(thisSort1)
elif key:
def thisSort2(a,b):
return cmp(key(a), key(b))
self.l.sort(thisSort2)
elif reverse:
def thisSort3(a,b):
return -cmp(a, b)
self.l.sort(thisSort3)
else:
self.l.sort(cmp)
@noSourceTracking
def getArray(self):
"""
Access the javascript Array that is used internally by this list
"""
return self.l
@noSourceTracking
def __str__(self):
return self.__repr__()
@noSourceTracking
def toString(self):
return self.__repr__()
def __repr__(self):
#r = []
#for item in self:
# r.append(repr(item))
#return '[' + ', '.join(r) + ']'
JS("""
var s = "[";
for (var i=0; i < @{{self}}['l']['length']; i++) {
s += pyjslib['repr'](@{{self}}['l'][i]);
if (i < @{{self}}['l']['length'] - 1)
s += ", ";
};
s += "]"
return s;
""")
class Tuple:
@noSourceTracking
def __init__(self, data=None):
JS("""
this['l'] = [];
this['extend'](@{{data}});
""")
@noSourceTracking
def append(self, item):
JS(""" this['l'][this['l']['length']] = @{{item}};""")
@noSourceTracking
def extend(self, data):
JS("""
if (pyjslib['isArray'](@{{data}})) {
var n = this['l']['length'];
for (var i=0; i < @{{data}}['length']; i++) {
this['l'][n+i]=@{{data}}[i];
}
}
else if (pyjslib['isIteratable'](@{{data}})) {
var iter=@{{data}}['__iter__']();
var i=this['l']['length'];
try {
while (true) {
var item=iter['next']();
this['l'][i++]=item;
}
}
catch (e) {
if (e['__name__'] != 'StopIteration') throw e;
}
}
""")
@noSourceTracking
def remove(self, value):
JS("""
var index=this['index'](@{{value}});
if (index<0) return false;
this['l']['splice'](index, 1);
return true;
""")
@noSourceTracking
def index(self, value, start=0):
JS("""
var length=this['l']['length'];
for (var i=@{{start}}; i<length; i++) {
if (this['l'][i]==@{{value}}) {
return i;
}
}
return -1;
""")
@noSourceTracking
def insert(self, index, value):
JS(""" var a = this['l']; this['l']=a['slice'](0, @{{index}})['concat'](@{{value}}, a['slice'](@{{index}}));""")
@noSourceTracking
def pop(self, index = -1):
JS("""
if (@{{index}}<0) @{{index}} = this['l']['length'] + @{{index}};
var a = this['l'][@{{index}}];
this['l']['splice'](@{{index}}, 1);
return a;
""")
@noSourceTracking
def __cmp__(self, l):
if not isinstance(l, Tuple):
return 1
ll = len(self) - len(l)
if ll != 0:
return ll
for x in range(len(l)):
ll = cmp(self.__getitem__(x), l[x])
if ll != 0:
return ll
return 0
@noSourceTracking
def slice(self, lower, upper):
JS("""
if (@{{upper}}==null) return pyjslib['Tuple'](this['l']['slice'](@{{lower}}));
return pyjslib['Tuple'](this['l']['slice'](@{{lower}}, @{{upper}}));
""")
@noSourceTracking
def __getitem__(self, index):
JS("""
if (@{{index}}<0) @{{index}} = this['l']['length'] + @{{index}};
return this['l'][@{{index}}];
""")
@noSourceTracking
def __setitem__(self, index, value):
JS(""" this['l'][@{{index}}]=@{{value}};""")
@noSourceTracking
def __delitem__(self, index):
JS(""" this['l']['splice'](@{{index}}, 1);""")
@noSourceTracking
def __len__(self):
JS(""" return this['l']['length'];""")
@noSourceTracking
def __contains__(self, value):
return self.index(value) >= 0
@noSourceTracking
def __iter__(self):
JS("""
var i = 0;
var l = this['l'];
return {
'next': function() {
if (i >= l['length']) {
throw pyjslib['StopIteration'];
}
return l[i++];
},
'__iter__': function() {
return this;
}
};
""")
@noSourceTracking
def reverse(self):
JS(""" this['l']['reverse']();""")
def sort(self, cmp=None, key=None, reverse=False):
if not cmp:
cmp = cmp
if key and reverse:
def thisSort1(a,b):
return -cmp(key(a), key(b))
self.l.sort(thisSort1)
elif key:
def thisSort2(a,b):
return cmp(key(a), key(b))
self.l.sort(thisSort2)
elif reverse:
def thisSort3(a,b):
return -cmp(a, b)
self.l.sort(thisSort3)
else:
self.l.sort(cmp)
@noSourceTracking
def getArray(self):
"""
Access the javascript Array that is used internally by this list
"""
return self.l
@noSourceTracking
def __str__(self):
return self.__repr__()
@noSourceTracking
def toString(self):
return self.__repr__()
def __repr__(self):
#r = []
#for item in self:
# r.append(repr(item))
#if len(r) == 1:
# return '(' + ', '.join(r) + ',)'
#return '(' + ', '.join(r) + ')'
JS("""
var s = "(";
for (var i=0; i < @{{self}}['l']['length']; i++) {
s += pyjslib['repr'](@{{self}}['l'][i]);
if (i < @{{self}}['l']['length'] - 1)
s += ", ";
};
if (@{{self}}['l']['length'] == 1)
s += ",";
s += ")"
return s;
""")
class Dict:
@noSourceTracking
def __init__(self, data=None):
JS("""
this['d'] = {};
if (pyjslib['isArray'](@{{data}})) {
for (var i in @{{data}}) {
var item=@{{data}}[i];
this['__setitem__'](item[0], item[1]);
//var sKey=pyjslib['hash'](item[0]);
//this['d'][sKey]=item[1];
}
}
else if (pyjslib['isIteratable'](@{{data}})) {
var iter=@{{data}}['__iter__']();
try {
while (true) {
var item=iter['next']();
this['__setitem__'](item['__getitem__'](0), item['__getitem__'](1));
}
}
catch (e) {
if (e['__name__'] != 'StopIteration') throw e;
}
}
else if (pyjslib['isObject'](@{{data}})) {
for (var key in @{{data}}) {
this['__setitem__'](key, @{{data}}[key]);
}
}
""")
@noSourceTracking
def __setitem__(self, key, value):
JS("""
var sKey = pyjslib['hash'](@{{key}});
this['d'][sKey]=[@{{key}}, @{{value}}];
""")
@noSourceTracking
def __getitem__(self, key):
JS("""
var sKey = pyjslib['hash'](@{{key}});
var value=this['d'][sKey];
if (pyjslib['isUndefined'](value)){
throw pyjslib['KeyError'](@{{key}});
}
return value[1];
""")
@noSourceTracking
def __nonzero__(self):
JS("""
for (var i in this['d']){
return true;
}
return false;
""")
@noSourceTracking
def __len__(self):
JS("""
var size=0;
for (var i in this['d']) size++;
return size;
""")
@noSourceTracking
def has_key(self, key):
return self.__contains__(key)
@noSourceTracking
def __delitem__(self, key):
JS("""
var sKey = pyjslib['hash'](@{{key}});
delete this['d'][sKey];
""")
@noSourceTracking
def __contains__(self, key):
JS("""
var sKey = pyjslib['hash'](@{{key}});
return (pyjslib['isUndefined'](this['d'][sKey])) ? false : true;
""")
@noSourceTracking
def keys(self):
JS("""
var keys=new pyjslib['List']();
for (var key in this['d']) {
keys['append'](this['d'][key][0]);
}
return keys;
""")
@noSourceTracking
def values(self):
JS("""
var values=new pyjslib['List']();
for (var key in this['d']) values['append'](this['d'][key][1]);
return values;
""")
@noSourceTracking
def items(self):
JS("""
var items = new pyjslib['List']();
for (var key in this['d']) {
var kv = this['d'][key];
items['append'](new pyjslib['List'](kv))
}
return items;
""")
@noSourceTracking
def __iter__(self):
return self.keys().__iter__()
@noSourceTracking
def iterkeys(self):
return self.__iter__()
@noSourceTracking
def itervalues(self):
return self.values().__iter__();
@noSourceTracking
def iteritems(self):
return self.items().__iter__();
@noSourceTracking
def setdefault(self, key, default_value):
if not self.has_key(key):
self[key] = default_value
return self[key]
@noSourceTracking
def get(self, key, default_value=None):
if not self.has_key(key):
return default_value
return self[key]
@noSourceTracking
def update(self, d):
for k,v in d.iteritems():
self[k] = v
@noSourceTracking
def getObject(self):
"""
Return the javascript Object which this class uses to store
dictionary keys and values
"""
return self.d
@noSourceTracking
def copy(self):
return Dict(self.items())
@noSourceTracking
def __str__(self):
return self.__repr__()
@noSourceTracking
def toString(self):
return self.__repr__()
def __repr__(self):
#r = []
#for item in self:
# r.append(repr(item) + ': ' + repr(self[item]))
#return '{' + ', '.join(r) + '}'
JS("""
var keys = new Array();
for (var key in @{{self}}['d'])
keys['push'](key);
var s = "{";
for (var i=0; i<keys['length']; i++) {
var v = @{{self}}['d'][keys[i]]
s += pyjslib['repr'](v[0]) + ": " + pyjslib['repr'](v[1]);
if (i < keys['length']-1)
s += ", "
};
s += "}";
return s;
""")
# IE6 doesn't like pyjslib.super
@noSourceTracking
def _super(type_, object_or_type = None):
# This is a partially implementation: only super(type, object)
if not _issubtype(object_or_type, type_):
raise TypeError("super(type, obj): obj must be an instance or subtype of type")
JS("""
var fn = pyjs_type('super', @{{type_}}['__mro__']['slice'](1), {})
fn['__new__'] = fn['__mro__'][1]['__new__'];
fn['__init__'] = fn['__mro__'][1]['__init__'];
if (@{{object_or_type}}['__is_instance__'] === false) {
return fn;
}
var obj = new Object();
function wrapper(obj, name) {
var fnwrap = function() {
var args = [];
for (var i = 0; i < arguments['length']; i++) {
args['push'](arguments[i]);
}
return obj[name]['apply'](@{{object_or_type}},args);
}
fnwrap['__name__'] = name;
fnwrap['parse_kwargs'] = obj['parse_kwargs'];
return fnwrap;
}
for (var m in fn) {
if (typeof fn[m] == 'function') {
obj[m] = wrapper(fn, m);
}
}
return obj;
""")
# taken from mochikit: range( [start,] stop[, step] )
@noSourceTracking
def range(start, stop = None, step = 1):
if stop is None:
stop = start
start = 0
JS("""
/*
var start = 0;
var stop = 0;
var step = 1;
if (arguments['length'] == 2) {
start = arguments[0];
stop = arguments[1];
}
else if (arguments['length'] == 3) {
start = arguments[0];
stop = arguments[1];
step = arguments[2];
}
else if (arguments['length']>0) stop = arguments[0];
*/
return {
'next': function() {
if ((@{{step}} > 0 && @{{start}} >= @{{stop}}) || (@{{step}} < 0 && @{{start}} <= @{{stop}})) throw pyjslib['StopIteration'];
var rval = @{{start}};
@{{start}} += @{{step}};
return rval;
},
'__iter__': function() {
return this;
}
}
""")
@noSourceTracking
def slice(object, lower, upper):
JS("""
if (pyjslib['isString'](object)) {
if (@{{lower}} < 0) {
@{{lower}} = object['length'] + @{{lower}};
}
if (@{{upper}} < 0) {
@{{upper}} = object['length'] + @{{upper}};
}
if (pyjslib['isNull'](@{{upper}})) @{{upper}}=object['length'];
return object['substring'](@{{lower}}, @{{upper}});
}
if (pyjslib['isObject'](object) && object['slice'])
return object['slice'](@{{lower}}, @{{upper}});
return null;
""")
@noSourceTracking
def str(text):
JS("""
if (pyjslib['hasattr'](@{{text}},"__str__")) {
return @{{text}}['__str__']();
}
return String(@{{text}});
""")
@noSourceTracking
def ord(x):
if(isString(x) and len(x) is 1):
JS("""
return @{{x}}['charCodeAt'](0);
""")
else:
JS("""
throw pyjslib['TypeError']();
""")
return None
@noSourceTracking
def chr(x):
JS("""
return String['fromCharCode'](@{{x}})
""")
@noSourceTracking
def is_basetype(x):
JS("""
var t = typeof(@{{x}});
return t == 'boolean' ||
t == 'function' ||
t == 'number' ||
t == 'string' ||
t == 'undefined'
;
""")
@noSourceTracking
def get_pyjs_classtype(x):
JS("""
if (pyjslib['hasattr'](@{{x}}, "__is_instance__")) {
var src = @{{x}}['__name__'];
return src;
}
return null;
""")
@noSourceTracking
def repr(x):
""" Return the string representation of 'x'.
"""
if hasattr(x, '__repr__'):
return x.__repr__()
JS("""
if (@{{x}} === null)
return "null";
if (@{{x}} === undefined)
return "undefined";
var t = typeof(@{{x}});
//alert("repr typeof " + t + " : " + xXXX);
if (t == "boolean")
return @{{x}}['toString']();
if (t == "function")
return "<function " + @{{x}}['toString']() + ">";
if (t == "number")
return @{{x}}['toString']();
if (t == "string") {
if (@{{x}}['indexOf']("'") == -1)
return "'" + @{{x}} + "'";
if (@{{x}}['indexOf']('"') == -1)
return '"' + @{{x}} + '"';
var s = @{{x}}['replace'](new RegExp('"', "g"), '\\\\"');
return '"' + s + '"';
};
if (t == "undefined")
return "undefined";
// If we get here, x is an object. See if it's a Pyjamas class.
if (!pyjslib['hasattr'](@{{x}}, "__init__"))
return "<" + @{{x}}['toString']() + ">";
// Handle the common Pyjamas data types.
var constructor = "UNKNOWN";
constructor = pyjslib['get_pyjs_classtype'](@{{x}});
//alert("repr constructor: " + constructor);
// If we get here, the class isn't one we know -> return the class name.
// Note that we replace underscores with dots so that the name will
// (hopefully!) look like the original Python name.
//var s = constructor['replace'](new RegExp('_', "g"), '.');
return "<" + constructor + " object>";
""")
@noSourceTracking
def float(text):
JS("""
return parseFloat(@{{text}});
""")
@noSourceTracking
def int(text, radix=0):
JS("""
var i = parseInt(@{{text}}, @{{radix}});
if (!isNaN(i)) {
return i;
}
""")
if radix == 0:
radix = 10
raise ValueError("invalid literal for int() with base %d: '%s'" % (radix, text))
@noSourceTracking
def len(object):
JS("""
if (object==null) return 0;
if (pyjslib['isObject'](object) && object['__len__']) return object['__len__']();
return object['length'];
""")
@noSourceTracking
def isinstance(object_, classinfo):
if pyjslib.isUndefined(object_):
return False
JS("""if (@{{classinfo}}['__name__'] == 'int') {
return pyjslib['isNumber'](@{{object_}}); /* XXX TODO: check rounded? */
}
""")
JS("""if (@{{classinfo}}['__name__'] == 'str') {
return pyjslib['isString'](@{{object_}});
}
""")
if not pyjslib.isObject(object_):
return False
if _isinstance(classinfo, Tuple):
for ci in classinfo:
if isinstance(object_, ci):
return True
return False
else:
return _isinstance(object_, classinfo)
@noSourceTracking
def _isinstance(object_, classinfo):
JS("""
if (@{{object_}}['__is_instance__'] !== true) {
return false;
}
for (var c in @{{object_}}['__mro__']) {
if (@{{object_}}['__mro__'][c]['__md5__'] == @{{classinfo}}['prototype']['__md5__']) return true;
}
return false;
""")
@noSourceTracking
def _issubtype(object_, classinfo):
JS("""
if (@{{object_}}['__is_instance__'] == null || @{{classinfo}}['__is_instance__'] == null) {
return false;
}
for (var c in @{{object_}}['__mro__']) {
if (@{{object_}}['__mro__'][c] == @{{classinfo}}['prototype']) return true;
}
return false;
""")
@noSourceTracking
def getattr(obj, name, default_value=None):
JS("""
if ((!pyjslib['isObject'](@{{obj}}))||(pyjslib['isUndefined'](@{{obj}}[@{{name}}]))){
if (arguments['length'] != 3){
throw pyjslib['AttributeError'](@{{obj}}, @{{name}});
}else{
return @{{default_value}};
}
}
if (!pyjslib['isFunction'](@{{obj}}[@{{name}}])) return @{{obj}}[@{{name}}];
var method = @{{obj}}[@{{name}}];
var fnwrap = function() {
var args = [];
for (var i = 0; i < arguments['length']; i++) {
args['push'](arguments[i]);
}
return method['apply'](@{{obj}},args);
}
fnwrap['__name__'] = @{{name}};
fnwrap['parse_kwargs'] = @{{obj}}['parse_kwargs'];
return fnwrap;
""")
@noSourceTracking
def delattr(obj, name):
JS("""
if (!pyjslib['isObject'](@{{obj}})) {
throw pyjslib['AttributeError']("'"+typeof(@{{obj}})+"' object has no attribute '"+@{{name}}+"%s'")
}
if ((pyjslib['isUndefined'](@{{obj}}[@{{name}}])) ||(typeof(@{{obj}}[@{{name}}]) == "function") ){
throw pyjslib['AttributeError'](@{{obj}}['__name__']+" instance has no attribute '"+ @{{name}}+"'");
}
delete @{{obj}}[@{{name}}];
""")
@noSourceTracking
def setattr(obj, name, value):
JS("""
if (!pyjslib['isObject'](@{{obj}})) return null;
@{{obj}}[@{{name}}] = @{{value}};
""")
@noSourceTracking
def hasattr(obj, name):
JS("""
if (!pyjslib['isObject'](@{{obj}})) return false;
if (pyjslib['isUndefined'](@{{obj}}[@{{name}}])) return false;
return true;
""")
@noSourceTracking
def dir(obj):
JS("""
var properties=new pyjslib['List']();
for (property in @{{obj}}) properties['append'](property);
return properties;
""")
@noSourceTracking
def filter(obj, method, sequence=None):
# object context is LOST when a method is passed, hence object must be passed separately
# to emulate python behaviour, should generate this code inline rather than as a function call
items = []
if sequence is None:
sequence = method
method = obj
for item in sequence:
if method(item):
items.append(item)
else:
for item in sequence:
if method.call(obj, item):
items.append(item)
return items
@noSourceTracking
def map(obj, method, sequence=None):
items = []
if sequence is None:
sequence = method
method = obj
for item in sequence:
items.append(method(item))
else:
for item in sequence:
items.append(method.call(obj, item))
return items
def enumerate(sequence):
enumeration = []
nextIndex = 0
for item in sequence:
enumeration.append([nextIndex, item])
nextIndex = nextIndex + 1
return enumeration
def min(*sequence):
if len(sequence) == 1:
sequence = sequence[0]
minValue = None
for item in sequence:
if minValue is None:
minValue = item
elif cmp(item, minValue) == -1:
minValue = item
return minValue
def max(*sequence):
if len(sequence) == 1:
sequence = sequence[0]
maxValue = None
for item in sequence:
if maxValue is None:
maxValue = item
elif cmp(item, maxValue) == 1:
maxValue = item
return maxValue
@noSourceTracking
def hash(obj):
JS("""
if (@{{obj}} == null) return null;
if (@{{obj}}['$H']) return @{{obj}}['$H'];
if (@{{obj}}['__hash__']) return @{{obj}}['__hash__']();
if (@{{obj}}['constructor'] == String || @{{obj}}['constructor'] == Number || @{{obj}}['constructor'] == Date) return @{{obj}};
@{{obj}}['$H'] = ++pyjslib['next_hash_id'];
return @{{obj}}['$H'];
""")
# type functions from Douglas Crockford's Remedial Javascript: http://www.crockford.com/javascript/remedial.html
@noSourceTracking
def isObject(a):
JS("""
return (@{{a}} != null && (typeof @{{a}} == 'object')) || pyjslib['isFunction'](@{{a}});
""")
@noSourceTracking
def isFunction(a):
JS("""
return typeof @{{a}} == 'function';
""")
@noSourceTracking
def isString(a):
JS("""
return typeof @{{a}} == 'string';
""")
@noSourceTracking
def isNull(a):
JS("""
return typeof @{{a}} == 'object' && !@{{a}};
""")
@noSourceTracking
def isArray(a):
JS("""
return pyjslib['isObject'](@{{a}}) && @{{a}}['constructor'] == Array;
""")
@noSourceTracking
def isUndefined(a):
JS("""
return typeof @{{a}} == 'undefined';
""")
@noSourceTracking
def isIteratable(a):
JS("""
return pyjslib['isString'](@{{a}}) || (pyjslib['isObject'](@{{a}}) && @{{a}}['__iter__']);
""")
@noSourceTracking
def isNumber(a):
JS("""
return typeof @{{a}} == 'number' && isFinite(@{{a}});
""")
@noSourceTracking
def toJSObjects(x):
"""
Convert the pyjs pythonic List and Dict objects into javascript Object and Array
objects, recursively.
"""
if isArray(x):
JS("""
var result = [];
for(var k=0; k < @{{x}}['length']; k++) {
var v = @{{x}}[k];
var tv = pyjslib['toJSObjects'](v);
result['push'](tv);
}
return result;
""")
if isObject(x):
if isinstance(x, Dict):
JS("""
var o = @{{x}}['getObject']();
var result = {};
for (var i in o) {
result[o[i][0]['toString']()] = o[i][1];
}
return pyjslib['toJSObjects'](result)
""")
elif isinstance(x, List):
return toJSObjects(x.l)
elif hasattr(x, '__class__'):
# we do not have a special implementation for custom
# classes, just pass it on
return x
if isObject(x):
JS("""
var result = {};
for(var k in @{{x}}) {
var v = @{{x}}[k];
var tv = pyjslib['toJSObjects'](v)
result[k] = tv;
}
return result;
""")
return x
@noSourceTracking
def sprintf(strng, args):
# See http://docs.python.org/library/stdtypes.html
constructor = get_pyjs_classtype(args)
JS("""
var re_dict = /([^%]*)%[(]([^)]+)[)]([#0\x20\0x2B-]*)(\d+)?(\.\d+)?[hlL]?(.)((.|\\n)*)/;
var re_list = /([^%]*)%([#0\x20\x2B-]*)(\*|(\d+))?(\.\d+)?[hlL]?(.)((.|\\n)*)/;
var re_exp = /(.*)([+-])(.*)/;
""")
strlen = len(strng)
argidx = 0
nargs = 0
result = []
remainder = strng
def next_arg():
if argidx == nargs:
raise TypeError("not enough arguments for format string")
arg = args[argidx]
argidx += 1
return arg
def formatarg(flags, minlen, precision, conversion, param):
subst = ''
numeric = True
if not minlen:
minlen=0
else:
minlen = int(minlen)
if not precision:
precision = None
else:
precision = int(precision)
left_padding = 1
if flags.find('-') >= 0:
left_padding = 0
if conversion == '%':
numeric = False
subst = '%'
elif conversion == 'c':
numeric = False
subst = chr(int(param))
elif conversion == 'd' or conversion == 'i' or conversion == 'u':
subst = str(int(param))
elif conversion == 'e':
if precision is None:
precision = 6
JS("""
@{{subst}} = @{{!re_exp}}['exec'](String(@{{param}}['toExponential'](@{{precision}})));
if (@{{subst}}[3]['length'] == 1) {
@{{subst}} = @{{subst}}[1] + @{{subst}}[2] + '0' + @{{subst}}[3];
} else {
@{{subst}} = @{{subst}}[1] + @{{subst}}[2] + @{{subst}}[3];
}""")
elif conversion == 'E':
if precision is None:
precision = 6
JS("""
@{{subst}} = @{{!re_exp}}['exec'](String(@{{param}}['toExponential'](@{{precision}}))['toUpperCase']());
if (@{{subst}}[3]['length'] == 1) {
@{{subst}} = @{{subst}}[1] + @{{subst}}[2] + '0' + @{{subst}}[3];
} else {
@{{subst}} = @{{subst}}[1] + @{{subst}}[2] + @{{subst}}[3];
}""")
elif conversion == 'f':
if precision is None:
precision = 6
JS("""
@{{subst}} = String(parseFloat(@{{param}})['toFixed'](@{{precision}}));""")
elif conversion == 'F':
if precision is None:
precision = 6
JS("""
@{{subst}} = String(parseFloat(@{{param}})['toFixed'](@{{precision}}))['toUpperCase']();""")
elif conversion == 'g':
if flags.find('#') >= 0:
if precision is None:
precision = 6
if param >= 1E6 or param < 1E-5:
JS("""
@{{subst}} = String(@{{precision}} == null ? @{{param}}['toExponential']() : @{{param}}['toExponential']()['toPrecision'](@{{precision}}));""")
else:
JS("""
@{{subst}} = String(@{{precision}} == null ? parseFloat(@{{param}}) : parseFloat(@{{param}})['toPrecision'](@{{precision}}));""")
elif conversion == 'G':
if flags.find('#') >= 0:
if precision is None:
precision = 6
if param >= 1E6 or param < 1E-5:
JS("""
@{{subst}} = String(@{{precision}} == null ? @{{param}}['toExponential']() : @{{param}}['toExponential']()['toPrecision'](@{{precision}}))['toUpperCase']();""")
else:
JS("""
@{{subst}} = String(@{{precision}} == null ? parseFloat(@{{param}}) : parseFloat(@{{param}})['toPrecision'](@{{precision}}))['toUpperCase']()['toUpperCase']();""")
elif conversion == 'r':
numeric = False
subst = repr(param)
elif conversion == 's':
numeric = False
subst = str(param)
elif conversion == 'o':
param = int(param)
JS("""
@{{subst}} = @{{param}}['toString'](8);""")
if flags.find('#') >= 0 and subst != '0':
subst = '0' + subst
elif conversion == 'x':
param = int(param)
JS("""
@{{subst}} = @{{param}}['toString'](16);""")
if flags.find('#') >= 0:
if left_padding:
subst = subst.rjust(minlen - 2, '0')
subst = '0x' + subst
elif conversion == 'X':
param = int(param)
JS("""
@{{subst}} = @{{param}}['toString'](16)['toUpperCase']();""")
if flags.find('#') >= 0:
if left_padding:
subst = subst.rjust(minlen - 2, '0')
subst = '0X' + subst
else:
raise ValueError("unsupported format character '" + conversion + "' ("+hex(ord(conversion))+") at index " + (strlen - len(remainder) - 1))
if minlen and len(subst) < minlen:
padchar = ' '
if numeric and left_padding and flags.find('0') >= 0:
padchar = '0'
if left_padding:
subst = subst.rjust(minlen, padchar)
else:
subst = subst.ljust(minlen, padchar)
return subst
def sprintf_list(strng, args):
while remainder:
JS("""
var a = @{{!re_list}}['exec'](@{{remainder}});""")
if a is None:
result.append(remainder)
break;
JS("""
var left = @{{!a}}[1], flags = @{{!a}}[2];
var minlen = @{{!a}}[3], precision = @{{!a}}[5], conversion = @{{!a}}[6];
@{{remainder}} = @{{!a}}[7];
if (typeof minlen == 'undefined') minlen = null;
if (typeof precision == 'undefined') precision = null;
if (typeof conversion == 'undefined') conversion = null;
""")
result.append(left)
if minlen == '*':
minlen = next_arg()
JS("var minlen_type = typeof(@{{minlen}});")
if minlen_type != 'number' or \
int(minlen) != minlen:
raise TypeError('* wants int')
if conversion != '%':
param = next_arg()
result.append(formatarg(flags, minlen, precision, conversion, param))
def sprintf_dict(strng, args):
arg = args
argidx += 1
while remainder:
JS("""
var a = @{{!re_dict}}['exec'](@{{remainder}});""")
if a is None:
result.append(remainder)
break;
JS("""
var left = @{{!a}}[1], key = @{{!a}}[2], flags = @{{!a}}[3];
var minlen = @{{!a}}[4], precision = @{{!a}}[5], conversion = @{{!a}}[6];
@{{remainder}} = @{{!a}}[7];
if (typeof minlen == 'undefined') minlen = null;
if (typeof precision == 'undefined') precision = null;
if (typeof conversion == 'undefined') conversion = null;
""")
result.append(left)
if not arg.has_key(key):
raise KeyError(key)
else:
param = arg[key]
result.append(formatarg(flags, minlen, precision, conversion, param))
JS("""
var a = @{{!re_dict}}['exec'](@{{strng}});
""")
if a is None:
if constructor != "Tuple":
args = (args,)
nargs = len(args)
sprintf_list(strng, args)
if argidx != nargs:
raise TypeError('not all arguments converted during string formatting')
else:
if constructor != "Dict":
raise TypeError("format requires a mapping")
sprintf_dict(strng, args)
return ''.join(result)
@noSourceTracking
def printFunc(objs, newline):
JS("""
if ($wnd['console']==undefined) return;
var s = "";
for(var i=0; i < @{{objs}}['length']; i++) {
if(s != "") s += " ";
s += @{{objs}}[i];
}
console['debug'](s)
""")
@noSourceTracking
def type(clsname, bases=None, methods=None):
""" creates a class, derived from bases, with methods and variables
"""
JS(" var mths = {}; ")
if methods:
for k in methods.keys():
mth = methods[k]
JS(" @{{mths}}[@{{k}}] = @{{mth}}; ")
JS(" var bss = null; ")
if bases:
JS("@{{bss}} = @{{bases}}['l'];")
JS(" return pyjs_type(@{{clsname}}, @{{bss}}, @{{mths}}); ")
def pow(x, y, z = None):
JS("@{{p}} = Math['pow'](@{{x}}, @{{y}});")
if z is None:
return float(p)
return float(p % z)
def hex(x):
if int(x) != x:
raise TypeError("hex() argument can't be converted to hex")
JS("@{{r}} = '0x'+@{{x}}['toString'](16);")
return str(r)
def oct(x):
if int(x) != x:
raise TypeError("oct() argument can't be converted to oct")
JS("@{{r}} = '0'+@{{x}}['toString'](8);")
return str(r)
def round(x, n = 0):
n = pow(10, n)
JS("@{{r}} = Math['round'](@{{n}}*@{{x}})/@{{n}};")
return float(r)
def divmod(x, y):
if int(x) == x and int(y) == y:
return (int(x / y), int(x % y))
JS("@{{f}} = Math['floor'](@{{x}} / @{{y}});")
f = float(f)
return (f, x - f * y)
def all(iterable):
for element in iterable:
if not element:
return False
return True
def any(iterable):
for element in iterable:
if element:
return True
return False
|
dagwieers/ansible | refs/heads/devel | test/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py | 84 | # (c) 2018 Yunge Zhu, <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: azure_service_principal_attribute
requirements:
- azure-graphrbac
author:
- Yunge Zhu <[email protected]>
version_added: "2.7"
short_description: Look up Azure service principal attributes.
description:
- Describes object id of your Azure service principal account.
options:
azure_client_id:
description: azure service principal client id.
azure_secret:
description: azure service principal secret
azure_tenant:
description: azure tenant
azure_cloud_environment:
description: azure cloud environment
"""
EXAMPLES = """
set_fact:
object_id: "{{ lookup('azure_service_principal_attribute',
azure_client_id=azure_client_id,
azure_secret=azure_secret,
azure_tenant=azure_secret) }}"
"""
RETURN = """
_raw:
description:
Returns object id of service principal.
"""
from ansible.errors import AnsibleError
from ansible.plugins import AnsiblePlugin
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_native
try:
from azure.common.credentials import ServicePrincipalCredentials
from azure.graphrbac import GraphRbacManagementClient
from msrestazure import azure_cloud
from msrestazure.azure_exceptions import CloudError
except ImportError:
raise AnsibleError(
"The lookup azure_service_principal_attribute requires azure.graphrbac, msrest")
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
self.set_options(direct=kwargs)
credentials = {}
credentials['azure_client_id'] = self.get_option('azure_client_id', None)
credentials['azure_secret'] = self.get_option('azure_secret', None)
credentials['azure_tenant'] = self.get_option('azure_tenant', 'common')
if credentials['azure_client_id'] is None or credentials['azure_secret'] is None:
raise AnsibleError("Must specify azure_client_id and azure_secret")
_cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD
if self.get_option('azure_cloud_environment', None) is not None:
cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment'])
try:
azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'],
secret=credentials['azure_secret'],
tenant=credentials['azure_tenant'],
resource=_cloud_environment.endpoints.active_directory_graph_resource_id)
client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'],
base_url=_cloud_environment.endpoints.active_directory_graph_resource_id)
response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id'])))
sp = response[0]
return sp.object_id.split(',')
except CloudError as ex:
raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex))
return False
|
onesfreedom/pybuilder | refs/heads/master | src/main/python/pybuilder/plugins/python/distutils_plugin.py | 3 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
import subprocess
import sys
try:
from StringIO import StringIO
except ImportError as e:
from io import StringIO
from pybuilder.core import (after,
before,
use_plugin,
init,
task,
RequirementsFile,
Dependency)
from pybuilder.errors import BuildFailedException
from pybuilder.utils import as_list
from .setuptools_plugin_helper import build_dependency_version_string
use_plugin("python.core")
DATA_FILES_PROPERTY = "distutils_data_files"
SETUP_TEMPLATE = string.Template("""#!/usr/bin/env python
$remove_hardlink_capabilities_for_shared_filesystems
from $module import setup
if __name__ == '__main__':
setup(
name = '$name',
version = '$version',
description = '''$summary''',
long_description = '''$description''',
author = "$author",
author_email = "$author_email",
license = '$license',
url = '$url',
scripts = $scripts,
packages = $packages,
py_modules = $modules,
classifiers = $classifiers,
entry_points={
'console_scripts':
[$console_scripts]
},
$data_files # data files
$package_data # package data
$dependencies
$dependency_links
zip_safe=True
)
""")
def default(value, default=""):
if value is None:
return default
return value
@init
def initialize_distutils_plugin(project):
project.set_property_if_unset("distutils_commands", ["sdist", "bdist_dumb"])
# Workaround for http://bugs.python.org/issue8876 , unable to build a bdist
# on a filesystem that does not support hardlinks
project.set_property_if_unset("distutils_issue8876_workaround_enabled", False)
project.set_property_if_unset("distutils_classifiers", [
"Development Status :: 3 - Alpha",
"Programming Language :: Python"
])
project.set_property_if_unset("distutils_use_setuptools", True)
@after("package")
def write_setup_script(project, logger):
setup_script = project.expand_path("$dir_dist/setup.py")
logger.info("Writing setup.py as %s", setup_script)
with open(setup_script, "w") as setup_file:
setup_file.write(render_setup_script(project))
os.chmod(setup_script, 0o755)
def render_setup_script(project):
author = ", ".join(map(lambda a: a.name, project.authors))
author_email = ", ".join(map(lambda a: a.email, project.authors))
console_scripts = project.get_property("distutils_console_scripts", [])
template_values = {
"module": "setuptools" if project.get_property("distutils_use_setuptools") else "distutils.core",
"name": project.name,
"version": project.version,
"summary": default(project.summary),
"description": default(project.description),
"author": author,
"author_email": author_email,
"license": default(project.license),
"url": default(project.url),
"scripts": build_scripts_string(project),
"packages": str([package for package in project.list_packages()]),
"modules": str([module for module in project.list_modules()]),
"classifiers": project.get_property("distutils_classifiers"),
"console_scripts": ",".join(["'%s'" % mapping for mapping in console_scripts]),
"data_files": build_data_files_string(project),
"package_data": build_package_data_string(project),
"dependencies": build_install_dependencies_string(project),
"dependency_links": build_dependency_links_string(project),
"remove_hardlink_capabilities_for_shared_filesystems": (
"import os\ndel os.link"
if project.get_property("distutils_issue8876_workaround_enabled")
else "")
}
return SETUP_TEMPLATE.substitute(template_values)
@after("package")
def write_manifest_file(project, logger):
if len(project.manifest_included_files) == 0:
logger.debug("No data to write into MANIFEST.in")
return
logger.debug("Files included in MANIFEST.in: %s" %
project.manifest_included_files)
manifest_filename = project.expand_path("$dir_dist/MANIFEST.in")
logger.info("Writing MANIFEST.in as %s", manifest_filename)
with open(manifest_filename, "w") as manifest_file:
manifest_file.write(render_manifest_file(project))
os.chmod(manifest_filename, 0o664)
def render_manifest_file(project):
manifest_content = StringIO()
for included_file in project.manifest_included_files:
manifest_content.write("include %s\n" % included_file)
return manifest_content.getvalue()
@before("publish")
def build_binary_distribution(project, logger):
logger.info("Building binary distribution in %s",
project.expand_path("$dir_dist"))
commands = as_list(project.get_property("distutils_commands"))
execute_distutils(project, logger, commands)
@task("install")
def install_distribution(project, logger):
logger.info("Installing project %s-%s", project.name, project.version)
execute_distutils(project, logger, as_list("install"))
def execute_distutils(project, logger, commands):
reports_dir = project.expand_path("$dir_reports/distutils")
if not os.path.exists(reports_dir):
os.mkdir(reports_dir)
setup_script = project.expand_path("$dir_dist/setup.py")
for command in commands:
logger.debug("Executing distutils command %s", command)
output_file_path = os.path.join(reports_dir, command.replace("/", ""))
with open(output_file_path, "w") as output_file:
commands = [sys.executable, setup_script]
commands.extend(command.split())
process = subprocess.Popen(commands,
cwd=project.expand_path("$dir_dist"),
stdout=output_file,
stderr=output_file,
shell=False)
return_code = process.wait()
if return_code != 0:
raise BuildFailedException(
"Error while executing setup command %s, see %s for details" % (command, output_file_path))
def strip_comments(requirements):
return [requirement for requirement in requirements
if not requirement.strip().startswith("#")]
def quote(requirements):
return ['"%s"' % requirement for requirement in requirements]
def is_editable_requirement(requirement):
return "-e " in requirement or "--editable " in requirement
def flatten_and_quote(requirements_file):
with open(requirements_file.name, 'r') as requirements_file:
requirements = [requirement.strip("\n") for requirement in requirements_file.readlines()]
requirements = [requirement for requirement in requirements if requirement]
return quote(strip_comments(requirements))
def format_single_dependency(dependency):
return '"%s%s"' % (dependency.name, build_dependency_version_string(dependency))
def build_install_dependencies_string(project):
dependencies = [
dependency for dependency in project.dependencies
if isinstance(dependency, Dependency) and not dependency.url]
requirements = [
requirement for requirement in project.dependencies
if isinstance(requirement, RequirementsFile)]
if not dependencies and not requirements:
return ""
dependencies = [format_single_dependency(dependency) for dependency in dependencies]
requirements = [strip_comments(flatten_and_quote(requirement)) for requirement in requirements]
flattened_requirements = [dependency for dependency_list in requirements for dependency in dependency_list]
flattened_requirements_without_editables = [
requirement for requirement in flattened_requirements if not is_editable_requirement(requirement)]
dependencies.extend(flattened_requirements_without_editables)
result = "install_requires = [ "
result += ", ".join(dependencies)
result += " ],"
return result
def build_dependency_links_string(project):
dependency_links = [
dependency for dependency in project.dependencies
if isinstance(dependency, Dependency) and dependency.url]
requirements = [
requirement for requirement in project.dependencies
if isinstance(requirement, RequirementsFile)]
editable_links_from_requirements = []
for requirement in requirements:
editables = [editable for editable in flatten_and_quote(requirement) if is_editable_requirement(editable)]
editable_links_from_requirements.extend(
[editable.replace("--editable ", "").replace("-e ", "") for editable in editables])
if not dependency_links and not requirements:
return ""
def format_single_dependency(dependency):
return '"%s"' % dependency.url
all_dependency_links = [link for link in map(format_single_dependency, dependency_links)]
all_dependency_links.extend(editable_links_from_requirements)
result = "dependency_links = [ "
result += ", ".join(all_dependency_links)
result += " ],"
return result
def build_scripts_string(project):
scripts = [script for script in project.list_scripts()]
scripts_dir = project.get_property("dir_dist_scripts")
if scripts_dir:
scripts = list(map(lambda s: os.path.join(scripts_dir, s), scripts))
return str(scripts)
def build_data_files_string(project):
data_files = project.files_to_install
if not len(data_files):
return ""
return "data_files = %s," % str(data_files)
def build_package_data_string(project):
package_data = project.package_data
if package_data == {}:
return ""
package_data_string = "package_data = {"
sorted_keys = sorted(package_data.keys())
last_element = sorted_keys[-1]
for key in sorted_keys:
package_data_string += "'%s': %s" % (key, str(package_data[key]))
if key is not last_element:
package_data_string += ", "
package_data_string += "},"
return package_data_string
|
salguarnieri/intellij-community | refs/heads/master | python/testData/copyPaste/multiLine/IndentMulti23.dst.py | 664 | class C:
def foo(self):
<caret> y = 2
|
nismod/energy_demand | refs/heads/master | energy_demand/plotting/fig_3_weather_map.py | 1 |
import os
import numpy as np
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.patches import Circle
from matplotlib.colors import LinearSegmentedColormap
import argparse
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
from collections import defaultdict
from energy_demand.plotting import fig_p2_weather_val, result_mapping
from energy_demand.basic import basic_functions
from energy_demand.technologies import tech_related
from energy_demand.read_write import write_data
from energy_demand.basic import conversions
from energy_demand.plotting import basic_plot_functions
def plot_4_cross_map(
cmap_rgb_colors,
reclassified,
result_path,
path_shapefile_input,
threshold=None,
seperate_legend=False
):
"""Plot classifed 4 cross map
"""
# --------------
# Use Cartopy to plot geometreis with reclassified faceolor
# --------------
plt.figure(figsize=basic_plot_functions.cm2inch(10, 10)) #, dpi=150)
proj = ccrs.OSGB() #'epsg:27700'
ax = plt.axes(projection=proj)
ax.outline_patch.set_visible(False)
# set up a dict to hold geometries keyed by our key
geoms_by_key = defaultdict(list)
# for each records, pick out our key's value from the record
# and store the geometry in the relevant list under geoms_by_key
for record in shpreader.Reader(path_shapefile_input).records():
region_name = record.attributes['name']
geoms_by_key[region_name].append(record.geometry)
# now we have all the geometries in lists for each value of our key
# add them to the axis, using the relevant color as facecolor
for key, geoms in geoms_by_key.items():
region_reclassified_value = reclassified.loc[key]['reclassified']
facecolor = cmap_rgb_colors[region_reclassified_value]
ax.add_geometries(geoms, crs=proj, edgecolor='black', facecolor=facecolor, linewidth=0.1)
# --------------
# Create Legend
# --------------
legend_handles = [
mpatches.Patch(color=cmap_rgb_colors[0], label=str("+- threshold {}".format(threshold))),
mpatches.Patch(color=cmap_rgb_colors[1], label=str("a")),
mpatches.Patch(color=cmap_rgb_colors[2], label=str("b")),
mpatches.Patch(color=cmap_rgb_colors[3], label=str("c")),
mpatches.Patch(color=cmap_rgb_colors[4], label=str("d"))]
legend = plt.legend(
handles=legend_handles,
#title="test",
prop={'size': 8},
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
frameon=False)
if seperate_legend:
basic_plot_functions.export_legend(
legend,
os.path.join(result_path, "{}__legend.pdf".format(result_path)))
legend.remove()
# Remove coordinates from figure
ax.set_yticklabels([])
ax.set_xticklabels([])
legend.get_title().set_fontsize(8)
# --------
# Labeling
# --------
plt.tight_layout()
plt.savefig(os.path.join(result_path))
plt.close()
def plot_4_cross_map_OLD(
cmap_rgb_colors,
reclassified,
result_path,
path_shapefile_input,
threshold=None,
seperate_legend=False
):
"""Plot classifed 4 cross map
"""
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile_input)
# Merge stats to geopanda
shp_gdp_merged = uk_shapefile.merge(reclassified, on='name')
# Assign projection
crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)
ax = uk_gdf.plot()
uk_gdf['facecolor'] = 'white'
for region in uk_gdf.index:
reclassified_value = uk_gdf.loc[region]['reclassified']
uk_gdf.loc[region, 'facecolor'] = cmap_rgb_colors[reclassified_value]
# plot with face color attribute
uk_gdf.plot(ax=ax, facecolor=uk_gdf['facecolor'], edgecolor='black', linewidth=0.1)
legend_handles = [
mpatches.Patch(color=cmap_rgb_colors[0], label=str("+- thr {}".format(threshold))),
mpatches.Patch(color=cmap_rgb_colors[1], label=str("a")),
mpatches.Patch(color=cmap_rgb_colors[2], label=str("b")),
mpatches.Patch(color=cmap_rgb_colors[3], label=str("c")),
mpatches.Patch(color=cmap_rgb_colors[4], label=str("d"))]
legend = plt.legend(
handles=legend_handles,
#title="test",
prop={'size': 8},
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
frameon=False)
if seperate_legend:
basic_plot_functions.export_legend(
legend,
os.path.join(result_path, "{}__legend.pdf".format(result_path)))
legend.remove()
# Remove coordinates from figure
ax.set_yticklabels([])
ax.set_xticklabels([])
legend.get_title().set_fontsize(8)
# --------
# Labeling
# --------
plt.tight_layout()
plt.savefig(os.path.join(result_path))
plt.close()
def total_annual_demand(
df_data_input,
path_shapefile_input,
regions,
pop_data,
simulation_yr_to_plot,
result_path,
fig_name,
field_to_plot,
unit='GW',
seperate_legend=True,
bins=False
):
"""
"""
if unit == 'GW':
conversion_factor = 1
elif unit == 'kW':
conversion_factor = conversions.gwh_to_kwh(gwh=1) #GW to KW
elif unit == 'percentage':
conversion_factor = 1
else:
raise Exception("Not defined unit")
df_data_input = df_data_input * conversion_factor
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile_input)
# Population of simulation year
pop_sim_yr = pop_data[simulation_yr_to_plot]
regions = list(df_data_input.columns)
nr_of_regions = df_data_input.shape[1]
nr_of_realisations = df_data_input.shape[0]
# Mean over all realisations
mean = df_data_input.mean(axis=0)
# Mean normalized with population
mean_norm_pop = df_data_input.mean(axis=0) / pop_sim_yr
# Standard deviation over all realisations
std_dev = df_data_input.std(axis=0)
max_entry = df_data_input.max(axis=0) #maximum entry for every hour
min_entry = df_data_input.min(axis=0) #maximum entry for every hour
#print("---- Calculate average per person")
tot_person = sum(pop_sim_yr)
#print(df_data_input.iloc[0])
tot_demand = sum(df_data_input.iloc[0])
##print("TOT PERSON: " + str(tot_person))
#print("TOT PERSON: " + str(tot_demand))
#print('AVERAGE KW per Person " '+ str(tot_demand / tot_person))
#print(df_data_input)
regional_statistics_columns = [
'name',
'mean',
'mean_norm_pop',
#'mean_norm_pop_std_dev',
'std_dev']#
#'diff_av_max',
#'mean_pp',
#'diff_av_max_pp',
#'std_dev_average_every_h',
#'std_dev_peak_h_norm_pop']
df_stats = pd.DataFrame(columns=regional_statistics_columns)
for region_name in regions:
line_entry = [[
str(region_name),
mean[region_name],
mean_norm_pop[region_name],
#mean_norm_pop_std_dev[region_name],
std_dev[region_name]
#diff_av_max,
#mean_peak_h_pp,
#diff_av_max_pp,
#std_dev_average_every_h,
#std_dev_peak_h_norm_pop
]]
line_df = pd.DataFrame(
line_entry,
columns=regional_statistics_columns)
df_stats = df_stats.append(line_df)
# ---------------
# Create spatial maps
# http://darribas.org/gds15/content/labs/lab_03.html
# http://nbviewer.jupyter.org/gist/jorisvandenbossche/57d392c085901eb4981054402b37b6b1
# ---------------
# Merge stats to geopanda
shp_gdp_merged = uk_shapefile.merge(df_stats, on='name')
# Assign projection
crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)
ax = uk_gdf.plot()
# Assign bin colors according to defined cmap and whether
# plot with min_max values or only min/max values
#bin_values = [0, 0.0025, 0.005, 0.0075, 0.01]
nr_of_intervals = 6
if bins:
bin_values = bins
else:
bin_values = result_mapping.get_reasonable_bin_values_II(
data_to_plot=list(uk_gdf[field_to_plot]),
nr_of_intervals=nr_of_intervals)
print("field_to_plot: {} BINS: {}".format(field_to_plot, bin_values))
uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = fig_p2_weather_val.user_defined_bin_classification(
uk_gdf,
field_to_plot,
bin_values=bin_values)
# plot with face color attribute
uk_gdf.plot(ax=ax, facecolor=uk_gdf['bin_color'], edgecolor='black', linewidth=0.1)
# TODO IMRPVE: MAKE CORRECT ONE FOR NEW PROCESSING
legend_handles = result_mapping.get_legend_handles(
bin_values[1:-1],
cmap_rgb_colors,
color_zero,
min_value,
max_value)
legend = plt.legend(
handles=legend_handles,
title="Unit: {} field: {}".format(unit, field_to_plot),
prop={'size': 8},
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
frameon=False)
if seperate_legend:
basic_plot_functions.export_legend(
legend,
os.path.join(result_path, "{}__legend.pdf".format(fig_name)))
legend.remove()
# Remove coordinates from figure
ax.set_yticklabels([])
ax.set_xticklabels([])
legend.get_title().set_fontsize(8)
# PLot bins on plot
'''plt.text(
0,
-20,
bin_values[:-1],
fontsize=8)'''
# --------
# Labeling
# --------
#plt.title("Peak demand over time")
plt.tight_layout()
#plt.show()
plt.savefig(os.path.join(result_path, fig_name))
plt.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.